code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''')
A__ = AutoTokenizer.from_pretrained('''google/mt5-small''')
A__ = tokenizer('''Hello there''' , return_tensors='''tf''').input_ids
A__ = tokenizer('''Hi I am''' , return_tensors='''tf''').input_ids
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__).loss
A__ = -tf.math.reduce_mean(UpperCAmelCase__).numpy()
A__ = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
| 14 |
"""simple docstring"""
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(A_ ):
_lowerCamelCase : Tuple = row[0]
for column_index, column in enumerate(A_ ):
if magnitude == 0:
_lowerCamelCase : List[Any] = column
continue
_lowerCamelCase : List[Any] = column / magnitude
# Subtract to cancel term
_lowerCamelCase : Union[str, Any] = current_set[0]
_lowerCamelCase : Dict = [first_row]
_lowerCamelCase : str = current_set[1::]
for row in current_set:
_lowerCamelCase : Union[str, Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A_ )
continue
for column_index in range(len(A_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_lowerCamelCase : Any = final_set[0]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_lowerCamelCase : Dict = simplify(A_ )
for i in range(len(A_ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, A_ )
_lowerCamelCase : Tuple = resultant
return final_set
def snake_case_ ( A_ : list[list] ):
'''simple docstring'''
if len(A_ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
_lowerCamelCase : Dict = len(A_ ) + 1
if any(len(A_ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A_, (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A_ ) == 1:
return [equations[0][-1] / equations[0][0]]
_lowerCamelCase : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_lowerCamelCase : str = data_set.copy()
_lowerCamelCase : List[Any] = []
for row_index, row in enumerate(A_ ):
if 0 not in row:
_lowerCamelCase : Union[str, Any] = data_set.pop(A_ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0, A_ )
_lowerCamelCase : List[str] = data_set.copy()
_lowerCamelCase : int = simplify(A_ )
_lowerCamelCase : int = simplified[::-1]
_lowerCamelCase : list = []
for row in simplified:
_lowerCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_lowerCamelCase : Optional[Any] = row.copy()[: len(A_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A_ ) == 0:
solutions.append(0 )
continue
_lowerCamelCase : Tuple = temp_row[1::]
_lowerCamelCase : Tuple = temp_row[::-1]
for column_index, column in enumerate(A_ ):
current_solution -= column * solutions[column_index]
solutions.append(A_ )
_lowerCamelCase : Optional[int] = []
for item in solutions:
final.append(float(round(A_, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 72 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase = update_area_of_max_square(_UpperCamelCase , col + 1 )
__lowerCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
__lowerCAmelCase = 1 + min([right, diagonal, down] )
__lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
__lowerCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
__lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
__lowerCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
__lowerCAmelCase = 1 + min([right, diagonal, down] )
__lowerCAmelCase = max(largest_square_area[0] , _UpperCamelCase )
__lowerCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase = [0]
__lowerCAmelCase = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase = dp_array[row][col + 1]
__lowerCAmelCase = dp_array[row + 1][col + 1]
__lowerCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = max(dp_array[row][col] , _UpperCamelCase )
else:
__lowerCAmelCase = 0
return largest_square_area
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [0] * (cols + 1)
__lowerCAmelCase = [0] * (cols + 1)
__lowerCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase = current_row[col + 1]
__lowerCAmelCase = next_row[col + 1]
__lowerCAmelCase = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = max(current_row[col] , _UpperCamelCase )
else:
__lowerCAmelCase = 0
__lowerCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 57 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = "llama"
lowerCAmelCase : Tuple = ["past_key_values"]
def __init__( self : Any ,_snake_case : List[Any]=32_000 ,_snake_case : str=4_096 ,_snake_case : Dict=11_008 ,_snake_case : int=32 ,_snake_case : Tuple=32 ,_snake_case : Optional[int]=None ,_snake_case : List[Any]="silu" ,_snake_case : Union[str, Any]=2_048 ,_snake_case : List[str]=0.02 ,_snake_case : List[Any]=1e-6 ,_snake_case : Optional[Any]=True ,_snake_case : Any=0 ,_snake_case : Optional[int]=1 ,_snake_case : List[str]=2 ,_snake_case : Tuple=1 ,_snake_case : str=False ,_snake_case : Tuple=None ,**_snake_case : Tuple ,) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = max_position_embeddings
lowercase__ : Any = hidden_size
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Any = num_key_value_heads
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Any = initializer_range
lowercase__ : str = rms_norm_eps
lowercase__ : List[Any] = pretraining_tp
lowercase__ : Any = use_cache
lowercase__ : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,tie_word_embeddings=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
lowercase__ : List[Any] = self.rope_scaling.get('''type''' ,_snake_case )
lowercase__ : Optional[Any] = self.rope_scaling.get('''factor''' ,_snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_snake_case ,_snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE:str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE:Dict = components[:-1] + [test_fn.replace(".py" , "" )]
SCREAMING_SNAKE_CASE:str = ".".join(snake_case )
return test_module_path
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_module_path(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = importlib.import_module(snake_case )
return test_module
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:List[Any] = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = []
SCREAMING_SNAKE_CASE:int = get_test_module(snake_case )
for attr in dir(snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE:Union[str, Any] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
SCREAMING_SNAKE_CASE:str = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE:Tuple = test.model_tester.__class__
return model_tester
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes_for_model(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE:Dict = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:str = get_test_classes(snake_case )
SCREAMING_SNAKE_CASE:Dict = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = get_model_classes(snake_case )
SCREAMING_SNAKE_CASE:Tuple = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 139 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> float:
if edge <= 0 or not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : str=None):
lowercase__ : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Any = model.mobilenet_va
else:
lowercase__ : List[str] = model
lowercase__ : List[str] = "MobilenetV1/Conv2d_0/"
lowercase__ : Any = backbone.conv_stem.convolution.weight
lowercase__ : Optional[Any] = backbone.conv_stem.normalization.bias
lowercase__ : Any = backbone.conv_stem.normalization.weight
lowercase__ : Dict = backbone.conv_stem.normalization.running_mean
lowercase__ : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(13):
lowercase__ : Tuple = i + 1
lowercase__ : int = i * 2
lowercase__ : Optional[Any] = backbone.layer[pt_index]
lowercase__ : Optional[int] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
lowercase__ : Dict = pointer.convolution.weight
lowercase__ : str = pointer.normalization.bias
lowercase__ : Dict = pointer.normalization.weight
lowercase__ : str = pointer.normalization.running_mean
lowercase__ : Dict = pointer.normalization.running_var
lowercase__ : Union[str, Any] = backbone.layer[pt_index + 1]
lowercase__ : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
lowercase__ : int = pointer.convolution.weight
lowercase__ : Optional[Any] = pointer.normalization.bias
lowercase__ : Tuple = pointer.normalization.weight
lowercase__ : Dict = pointer.normalization.running_mean
lowercase__ : Optional[int] = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : str = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowercase__ : List[Any] = model.classifier.weight
lowercase__ : int = model.classifier.bias
return tf_to_pt_map
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
# Load weights from TF model
lowercase__ : Optional[Any] = tf.train.list_variables(_lowerCamelCase)
lowercase__ : Any = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
lowercase__ : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = array
# Build TF to PyTorch weights loading map
lowercase__ : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
lowercase__ : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise")
lowercase__ : int = np.transpose(_lowerCamelCase , (2, 3, 0, 1))
elif "weights" in name:
logger.info("Transposing")
if len(pointer.shape) == 2: # copying into linear layer
lowercase__ : List[Any] = array.squeeze().transpose()
else:
lowercase__ : List[Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
lowercase__ : Tuple = torch.from_numpy(_lowerCamelCase)
tf_weights.pop(_lowerCamelCase , _lowerCamelCase)
tf_weights.pop(name + "/RMSProp" , _lowerCamelCase)
tf_weights.pop(name + "/RMSProp_1" , _lowerCamelCase)
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowerCamelCase)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : nn.Convad):
lowercase__ , lowercase__ : Optional[Any] = features.shape[-2:]
lowercase__ , lowercase__ : int = conv_layer.stride
lowercase__ , lowercase__ : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase__ : Union[str, Any] = max(kernel_height - stride_height , 0)
else:
lowercase__ : List[str] = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
lowercase__ : List[Any] = max(kernel_width - stride_width , 0)
else:
lowercase__ : Optional[int] = max(kernel_width - (in_width % stride_width) , 0)
lowercase__ : Tuple = pad_along_width // 2
lowercase__ : Tuple = pad_along_width - pad_left
lowercase__ : List[Any] = pad_along_height // 2
lowercase__ : List[Any] = pad_along_height - pad_top
lowercase__ : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , "constant" , 0.0)
class snake_case_ ( nn.Module ):
def __init__( self : List[str] , lowercase_ : MobileNetVaConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1 , lowercase_ : bool = False , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool or str] = True , ) -> None:
super().__init__()
lowercase__ : int = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
lowercase__ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase__ : str = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ , groups=lowercase_ , bias=lowercase_ , padding_mode="zeros" , )
if use_normalization:
lowercase__ : Dict = nn.BatchNormad(
num_features=lowercase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowercase_ , track_running_stats=lowercase_ , )
else:
lowercase__ : Optional[int] = None
if use_activation:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase_ ):
lowercase__ : int = ACTaFN[config.hidden_act]
else:
lowercase__ : Tuple = config.hidden_act
else:
lowercase__ : Optional[Any] = None
def __UpperCamelCase ( self : Any , lowercase_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
lowercase__ : int = apply_tf_padding(lowercase_ , self.convolution )
lowercase__ : List[str] = self.convolution(lowercase_ )
if self.normalization is not None:
lowercase__ : Any = self.normalization(lowercase_ )
if self.activation is not None:
lowercase__ : Any = self.activation(lowercase_ )
return features
class snake_case_ ( __A ):
__A : int = MobileNetVaConfig
__A : List[Any] = load_tf_weights_in_mobilenet_va
__A : Tuple = "mobilenet_v1"
__A : str = "pixel_values"
__A : Dict = False
def __UpperCamelCase ( self : Any , lowercase_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(lowercase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,__A ,)
class snake_case_ ( __A ):
def __init__( self : Optional[int] , lowercase_ : MobileNetVaConfig , lowercase_ : bool = True ) -> Union[str, Any]:
super().__init__(lowercase_ )
lowercase__ : str = config
lowercase__ : List[Any] = 32
lowercase__ : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase__ : Optional[int] = MobileNetVaConvLayer(
lowercase_ , in_channels=config.num_channels , out_channels=lowercase_ , kernel_size=3 , stride=2 , )
lowercase__ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase__ : Any = nn.ModuleList()
for i in range(13 ):
lowercase__ : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase__ : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=3 , stride=strides[i] , groups=lowercase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=1 , ) )
lowercase__ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : int , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowercase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase__ : Dict = self.conv_stem(lowercase_ )
lowercase__ : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase__ : List[Any] = layer_module(lowercase_ )
if output_hidden_states:
lowercase__ : Optional[Any] = all_hidden_states + (hidden_states,)
lowercase__ : int = hidden_states
if self.pooler is not None:
lowercase__ : Any = torch.flatten(self.pooler(lowercase_ ) , start_dim=1 )
else:
lowercase__ : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=lowercase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__A ,)
class snake_case_ ( __A ):
def __init__( self : Optional[Any] , lowercase_ : MobileNetVaConfig ) -> None:
super().__init__(lowercase_ )
lowercase__ : int = config.num_labels
lowercase__ : Optional[int] = MobileNetVaModel(lowercase_ )
lowercase__ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase__ : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase_ )
lowercase__ : Dict = nn.Linear(lowercase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = self.mobilenet_va(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase__ : int = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Any = self.classifier(self.dropout(lowercase_ ) )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Optional[Any] = "single_label_classification"
else:
lowercase__ : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ : Tuple = MSELoss()
if self.num_labels == 1:
lowercase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ : Optional[int] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Any = CrossEntropyLoss()
lowercase__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Optional[Any] = BCEWithLogitsLoss()
lowercase__ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase__ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , )
| 87 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ : Dict = 'examples/'
lowerCAmelCase_ : str = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase_ : Dict = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase_ : Tuple = 'README.md'
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : Any ) -> List[str]:
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.read()
_a , _a = REPLACE_PATTERNS[pattern]
_a = replace.replace("VERSION" , lowercase )
_a = re_pattern.sub(lowercase , lowercase )
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="examples" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = "🤗 Transformers currently provides the following architectures"
_a = "1. Want to contribute a new model?"
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.readlines()
# Find the start of the list.
_a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_a = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase )
def _lowerCamelCase ( ) -> List[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
_a = f.read()
_a = REPLACE_PATTERNS["init"][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def _lowerCamelCase ( lowercase : Dict=False ) -> Union[str, Any]:
_a = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_a = default_version.base_version
elif patch:
_a = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_a = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_a = input(F'Which version are you releasing? [{default_version}]' )
if len(lowercase ) == 0:
_a = default_version
print(F'Updating version to {version}.' )
global_version_update(lowercase , patch=lowercase )
def _lowerCamelCase ( ) -> Dict:
_a = get_version()
_a = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
_a = current_version.base_version
# Check with the user we got that right.
_a = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowercase ) == 0:
_a = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowercase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase_ : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 370 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 0 |
from math import pi, sqrt
def __snake_case ( _UpperCAmelCase ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(_UpperCAmelCase ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(_UpperCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __snake_case ( ):
assert gamma(0.5 ) == sqrt(_UpperCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Union[str, Any] = 1.0
while num:
__snake_case :Union[str, Any] = float(input('''Gamma of: '''))
print(f'gamma({num}) = {gamma(num)}')
print('''\nEnter 0 to exit...''')
| 49 |
UpperCAmelCase : Optional[Any] ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 128 | 0 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( __lowerCAmelCase : int ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case = precision
snake_case = ceil(precision / 14 )
snake_case = 42_68_80 * Decimal(1_00_05 ).sqrt()
snake_case = 1
snake_case = 13_59_14_09
snake_case = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 3 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 1 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 64 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase( metaclass=__a ):
'''simple docstring'''
lowercase__ = ["note_seq"]
def __init__( self: Dict, *a_: Union[str, Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(self, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Optional[int], *a_: Any, **a_: Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
@classmethod
def UpperCamelCase_ ( cls: Tuple, *a_: Optional[Any], **a_: List[str] ):
'''simple docstring'''
requires_backends(cls, ["""note_seq"""] )
| 64 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase__ ):
A__ : int =["""input_values""", """padding_mask"""]
def __init__( self : Tuple , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 24000 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = None , UpperCAmelCase_ : float = None , **UpperCAmelCase_ : str , ):
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
SCREAMING_SNAKE_CASE__ = chunk_length_s
SCREAMING_SNAKE_CASE__ = overlap
@property
def A_ ( self : Any ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def A_ ( self : List[Any] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : int , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Optional[Union[bool, str, PaddingStrategy]] = None , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = bool(
isinstance(__a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(__a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__a , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(__a ).T]
# verify inputs are valid
for idx, example in enumerate(__a ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE__ = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE__ = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE__ = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE__ = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE__ = 'max_length'
else:
SCREAMING_SNAKE_CASE__ = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE__ = self.pad(
__a , max_length=__a , truncation=__a , padding=__a , return_attention_mask=__a , )
if padding:
SCREAMING_SNAKE_CASE__ = padded_inputs.pop('attention_mask' )
SCREAMING_SNAKE_CASE__ = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE__ = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE__ = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 368 |
from __future__ import annotations
class lowercase__ :
def __init__( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ ), len(UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : str ):
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(UpperCAmelCase_ )
if mismatch_index == -1:
positions.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = """ABAABA"""
__snake_case = """AB"""
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 169 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( A__ ):
def __init__( self : List[str] , _a : Optional[Any]="" , _a : Any="train" ) -> int:
'''simple docstring'''
assert os.path.isdir(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_a , _a )
if not os.path.isfile(_a ):
continue
self.documents.append(_a )
def __len__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Optional[int] , _a : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('/' )[-1]
with open(_a , encoding='utf-8' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_a )
return document_name, story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(_UpperCamelCase )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda _UpperCamelCase : not t.startswith('@highlight' ) , _UpperCamelCase ) )
return story_lines, summary_lines
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.ones_like(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 47 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 0 |
from __future__ import annotations
from typing import Any
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
pass
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_UpperCAmelCase = self
_UpperCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCAmelCase )
yield node.data
_UpperCAmelCase = node.next_node
@property
def UpperCamelCase ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_a = Node(1)
_a = Node(2)
_a = Node(3)
_a = Node(4)
print(root_node.has_loop) # False
_a = root_node.next_node
print(root_node.has_loop) # True
_a = Node(5)
_a = Node(6)
_a = Node(5)
_a = Node(6)
print(root_node.has_loop) # False
_a = Node(1)
print(root_node.has_loop) # False
| 39 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=False ):
__a : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : Dict = ''''''
else:
__a : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
__a : Tuple = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a : Dict = in_proj_weight[
: config.hidden_size, :
]
__a : str = in_proj_bias[: config.hidden_size]
__a : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : str = in_proj_weight[
-config.hidden_size :, :
]
__a : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__a : str = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
__a : Optional[Any] = dct.pop(lowerCAmelCase__ )
__a : Optional[Any] = val
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
__a : str = ViTMSNConfig()
__a : List[Any] = 1_0_0_0
__a : Union[str, Any] = '''datasets/huggingface/label-files'''
__a : Optional[int] = '''imagenet-1k-id2label.json'''
__a : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ ) , '''r''' ) )
__a : Dict = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__a : Tuple = idalabel
__a : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a : Union[str, Any] = 3_8_4
__a : Union[str, Any] = 1_5_3_6
__a : Union[str, Any] = 6
elif "l16" in checkpoint_url:
__a : str = 1_0_2_4
__a : Union[str, Any] = 4_0_9_6
__a : Optional[int] = 2_4
__a : int = 1_6
__a : List[Any] = 0.1
elif "b4" in checkpoint_url:
__a : int = 4
elif "l7" in checkpoint_url:
__a : int = 7
__a : List[Any] = 1_0_2_4
__a : Union[str, Any] = 4_0_9_6
__a : List[str] = 2_4
__a : int = 1_6
__a : Tuple = 0.1
__a : Dict = ViTMSNModel(lowerCAmelCase__ )
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )['''target_encoder''']
__a : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCAmelCase__ )
__a : str = create_rename_keys(lowerCAmelCase__ , base_model=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , base_model=lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
__a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
__a : Any = ViTImageProcessor(
size=config.image_size , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
__a : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__a : str = model(**lowerCAmelCase__ )
__a : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a : Dict = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
__a : List[str] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
__a : str = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
__a : Union[str, Any] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
__a : Dict = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCAmelCase__ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowercase__ =parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 216 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase :str = logging.get_logger(__name__)
_lowerCAmelCase :List[Any] = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='convbert'
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A=7_6_8 , A=2 , A=9 , A=1 , A=None , **A , ) -> Union[str, Any]:
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = embedding_size
_UpperCAmelCase : List[str] = head_ratio
_UpperCAmelCase : str = conv_kernel_size
_UpperCAmelCase : str = num_groups
_UpperCAmelCase : int = classifier_dropout
class _UpperCAmelCase ( a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Dict:
if self.task == "multiple-choice":
_UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 360 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = [randint(-1000 , 1000 ) for i in range(10 )]
_UpperCAmelCase : int = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase :Optional[Any] = make_dataset()
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
arr.sort()
_UpperCAmelCase : Optional[int] = len(UpperCamelCase__ )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase : Any = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
_UpperCAmelCase : Any = '''
triplet_sum1(*dataset)
'''
_UpperCAmelCase : List[Any] = '''
triplet_sum2(*dataset)
'''
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase :List[str] = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 68 | 0 |
import functools
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase__ )
@functools.cache
def min_distance(lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCAmelCase__ ) , 1 + min_distance(lowerCAmelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | """simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _A :
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowercase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""prompt"""]
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
if "image" in inputs:
lowercase = inputs["""image"""]
else:
lowercase = None
if "mask_image" in inputs:
lowercase = inputs["""mask_image"""]
else:
lowercase = None
if "original_image" in inputs:
lowercase = inputs["""original_image"""]
else:
lowercase = None
lowercase , lowercase = pipe.encode_prompt(__lowerCAmelCase )
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = inputs["""generator"""]
lowercase = inputs["""num_inference_steps"""]
lowercase = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowercase = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowercase = image
if mask_image is not None:
lowercase = mask_image
if original_image is not None:
lowercase = original_image
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCAmelCase )
pipe_loaded.to(__lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe_loaded(**__lowerCAmelCase )[0]
lowercase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max()
self.assertLess(__lowerCAmelCase , 1E-4 )
| 197 | 0 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCAmelCase = F'''down_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCAmelCase = F'''down_blocks.{i}.attentions.{j}.'''
UpperCAmelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCAmelCase = F'''up_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCAmelCase = F'''up_blocks.{i}.attentions.{j}.'''
UpperCAmelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
UpperCAmelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCAmelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCAmelCase = 'mid_block.attentions.0.'
UpperCAmelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCAmelCase = F'''mid_block.resnets.{j}.'''
UpperCAmelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowerCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase = v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase = v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
lowerCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCAmelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCAmelCase = F'''down_blocks.{i}.downsamplers.0.'''
UpperCAmelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
UpperCAmelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCAmelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
UpperCAmelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCAmelCase = F'''mid_block.resnets.{i}.'''
UpperCAmelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase = v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase = v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
lowerCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
lowerCAmelCase = reshape_weight_for_sd(_SCREAMING_SNAKE_CASE )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCAmelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCAmelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCAmelCase = {'q': 0, 'k': 1, 'v': 2}
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase = [None, None, None]
lowerCAmelCase = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase = [None, None, None]
lowerCAmelCase = v
continue
lowerCAmelCase = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase = textenc_pattern.sub(lambda _SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE )
return new_state_dict
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCAmelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCAmelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCAmelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCAmelCase = load_file(unet_path, device='cpu')
else:
UpperCAmelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCAmelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCAmelCase = load_file(vae_path, device='cpu')
else:
UpperCAmelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCAmelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCAmelCase = load_file(text_enc_path, device='cpu')
else:
UpperCAmelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCAmelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCAmelCase = convert_unet_state_dict(unet_state_dict)
UpperCAmelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCAmelCase = convert_vae_state_dict(vae_state_dict)
UpperCAmelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCAmelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCAmelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCAmelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCAmelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCAmelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCAmelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path) | 187 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
lowerCAmelCase = False
if low == high:
return swapped
lowerCAmelCase = low
lowerCAmelCase = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase, lowerCAmelCase = (
collection[right],
collection[left],
)
lowerCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase, lowerCAmelCase = (
collection[right + 1],
collection[left],
)
lowerCAmelCase = True
lowerCAmelCase = low + int((high - low) / 2 )
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowerCAmelCase = True
while is_not_sorted is True:
lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted)) | 187 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowercase ( snake_case_ : List[str] ) ->Any:
'''simple docstring'''
__A : Optional[Any] = {}
__A : List[Any] = job['''started_at''']
__A : Optional[int] = job['''completed_at''']
__A : Optional[Any] = date_parser.parse(snake_case_ )
__A : Any = date_parser.parse(snake_case_ )
__A : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A : Dict = start
__A : List[Any] = end
__A : int = duration_in_min
return job_info
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Tuple=None ) ->Union[str, Any]:
'''simple docstring'''
__A : int = None
if token is not None:
__A : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
__A : Any = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__A : Any = requests.get(snake_case_ ,headers=snake_case_ ).json()
__A : List[Any] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
__A : str = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
__A : str = requests.get(url + F"""&page={i + 2}""" ,headers=snake_case_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
a_ = parser.parse_args()
a_ = get_job_time(args.workflow_run_id)
a_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 179 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """microsoft/speecht5_tts"""
_lowerCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase = """text_reader"""
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""audio"""]
def UpperCamelCase__( self ):
'''simple docstring'''
if self.post_processor is None:
__A : List[str] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 179 | 1 |
import warnings
from .generation import TFGenerationMixin
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase__ , )
| 362 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =CpmAntTokenizer
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
SCREAMING_SNAKE_CASE_ = '''今天天气真好!'''
SCREAMING_SNAKE_CASE_ = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
SCREAMING_SNAKE_CASE_ = '''今天天气真好!'''
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A )
self.assertEqual(_A , _A )
| 299 |
def A__ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 1 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 10 - x * x
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) >= 0:
raise ValueError('Wrong space!' )
snake_case_ = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ = (a + b) / 2
# Check if middle point is root
if equation(UpperCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCamelCase__ ) * equation(UpperCamelCase__ ) < 0:
snake_case_ = c
else:
snake_case_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 200 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
snake_case_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCamelCase__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 200 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self : str , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : int=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Tuple=99 , UpperCamelCase : List[Any]=32 , UpperCamelCase : int=5 , UpperCamelCase : List[str]=4 , UpperCamelCase : List[Any]=37 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : int=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[str]=4 , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : Tuple = seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : Tuple = use_attention_mask
lowerCAmelCase__ : Any = use_token_type_ids
lowerCAmelCase__ : List[str] = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Optional[int] = type_sequence_label_size
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Optional[int] = num_choices
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Dict = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , )
return config, input_ids, attention_mask
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = config_and_inputs
lowerCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Any = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = FlaxDistilBertModelTester(self )
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class_name.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase__ : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase__ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCAmelCase__ : Dict = (1, 11, 7_68)
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Tuple = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
| 242 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_A = getLogger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 8 , __UpperCAmelCase = 1024 , __UpperCAmelCase="val" , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase="summarization" , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase = None , __UpperCAmelCase="" , **__UpperCAmelCase , ) -> Dict:
lowerCAmelCase__ : Any = str(__UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" , rank=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = Path(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).cuda()
if fpaa:
lowerCAmelCase__ : Optional[int] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCAmelCase , __UpperCAmelCase ) # update config with task specific params
lowerCAmelCase__ : Any = generate_kwargs.pop("""num_beams""" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCAmelCase__ : Tuple = num_return_sequences
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCAmelCase__ : List[str] = tokenizer.model_max_length
if prefix is None:
lowerCAmelCase__ : Optional[int] = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
lowerCAmelCase__ : Tuple = SeqaSeqDataset(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_target_length=1024 , type_path=__UpperCAmelCase , n_obs=__UpperCAmelCase , prefix=__UpperCAmelCase , **__UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCAmelCase__ : List[str] = ds.make_sortish_sampler(__UpperCAmelCase , distributed=__UpperCAmelCase , add_extra_examples=__UpperCAmelCase , shuffle=__UpperCAmelCase )
lowerCAmelCase__ : int = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn )
lowerCAmelCase__ : Optional[int] = []
for batch in tqdm(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) , attention_mask=batch["""attention_mask"""].to(model.device ) , num_return_sequences=__UpperCAmelCase , num_beams=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
lowerCAmelCase__ : Any = batch["""ids"""]
if num_return_sequences > 1:
lowerCAmelCase__ : Dict = chunks(__UpperCAmelCase , __UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCAmelCase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return results, sampler.num_replicas
def lowercase_ ( ) -> str:
lowerCAmelCase__ : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" , type=__UpperCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" , type=__UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" , default="""sshleifer/distilbart-xsum-12-3""" , )
parser.add_argument("""--save_dir""" , type=__UpperCAmelCase , help="""where to save""" , default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" , type=__UpperCAmelCase , default=__UpperCAmelCase )
parser.add_argument(
"""--type_path""" , type=__UpperCAmelCase , default="""test""" , help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" , type=__UpperCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=__UpperCAmelCase , default=8 , required=__UpperCAmelCase , help="""batch size""" )
parser.add_argument(
"""--local_rank""" , type=__UpperCAmelCase , default=-1 , required=__UpperCAmelCase , help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" , type=__UpperCAmelCase , default=1 , required=__UpperCAmelCase , help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" , type=__UpperCAmelCase , default=600 , required=__UpperCAmelCase , help="""How long should master process wait for other processes to finish.""" , )
parser.add_argument("""--src_lang""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument("""--tgt_lang""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument(
"""--prefix""" , type=__UpperCAmelCase , required=__UpperCAmelCase , default=__UpperCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--debug""" , action="""store_true""" )
lowerCAmelCase__ : Union[str, Any] = time.time()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = parser.parse_known_args()
lowerCAmelCase__ : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(__UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
lowerCAmelCase__ : Tuple = Path(args.save_dir + """_tmp""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) # this handles locking.
lowerCAmelCase__ : Dict = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCAmelCase__ : Optional[int] = {}
if args.src_lang is not None:
lowerCAmelCase__ : Optional[int] = args.src_lang
if args.tgt_lang is not None:
lowerCAmelCase__ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = eval_data_dir(
args.data_dir , __UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
if args.local_rank <= 0:
lowerCAmelCase__ : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = gather_results_from_each_node(__UpperCAmelCase , __UpperCAmelCase , args.sync_timeout )
lowerCAmelCase__ : List[str] = combine_partial_results(__UpperCAmelCase )
if args.num_return_sequences > 1:
lowerCAmelCase__ : List[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return
lowerCAmelCase__ : str = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(__UpperCAmelCase ) as f:
lowerCAmelCase__ : Dict = [x.rstrip() for x in f.readlines()][: len(__UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCAmelCase__ : Dict = """translation""" in args.task
lowerCAmelCase__ : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
lowerCAmelCase__ : List[str] = """bleu""" if calc_bleu else """rouge"""
lowerCAmelCase__ : Dict = score_fn(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : int = len(__UpperCAmelCase )
lowerCAmelCase__ : int = time.time() - start_time
lowerCAmelCase__ : Optional[int] = round(runtime / metrics["""n_obs"""] , 4 )
lowerCAmelCase__ : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCAmelCase__ : int = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase )
print(__UpperCAmelCase )
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase ) -> List:
lowerCAmelCase__ : List[str] = []
for partial_result in partial_results:
records.extend(__UpperCAmelCase )
lowerCAmelCase__ : Dict = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["id"] )
lowerCAmelCase__ : Optional[int] = [x["""pred"""] for x in records]
return preds
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
lowerCAmelCase__ : Dict = time.time()
logger.info("""waiting for all nodes to finish""" )
lowerCAmelCase__ : Tuple = None
while (time.time() - start_wait) < timeout:
lowerCAmelCase__ : Optional[Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(__UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCAmelCase__ : Dict = lmap(__UpperCAmelCase , __UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 242 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 1 |
import numpy
class lowercase :
def __init__( self , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase = numpy.zeros(output_array.shape )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCamelCase = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = input_arr
UpperCamelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A ( lowercase ) -> Any:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def A ( lowercase ) -> Dict:
'''simple docstring'''
return (value) * (1 - (value))
def A ( ) -> Any:
'''simple docstring'''
UpperCamelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCamelCase = TwoHiddenLayerNeuralNetwork(
input_array=_A , output_array=_A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_A , iterations=10 , give_loss=_A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 222 |
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase__ : int = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : Any = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 187 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="resnet50", lowerCamelCase_=3, lowerCamelCase_=3_2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase__ : int = stage_names
lowerCamelCase__ : Optional[Any] = out_features
lowerCamelCase__ : str = backbone
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : List[Any] = use_pretrained_backbone
lowerCamelCase__ : Optional[Any] = is_training
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values
def a__ (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TimmBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(_lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 1_4, 1_4), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TimmBackboneModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self, config_class=_lowercase, has_text_modality=_lowercase )
def a__ (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'resnet18'
lowerCamelCase__ : int = 'microsoft/resnet-18'
lowerCamelCase__ : List[Any] = AutoBackbone.from_pretrained(_lowercase, use_timm_backbone=_lowercase )
lowerCamelCase__ : List[str] = AutoBackbone.from_pretrained(_lowercase )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ), len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,) )
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names ) - 1] )
lowerCamelCase__ : List[str] = AutoBackbone.from_pretrained(_lowercase, use_timm_backbone=_lowercase, out_indices=[1, 2, 3] )
lowerCamelCase__ : Optional[Any] = AutoBackbone.from_pretrained(_lowercase, out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices, transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(_lowercase )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], _lowercase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase__ : Optional[Any] = self.all_model_classes[0]
lowerCamelCase__ : Tuple = model_class(_lowercase )
model.to(_lowercase )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(_lowercase, _lowercase )
lowerCamelCase__ : List[Any] = model(**_lowercase )
lowerCamelCase__ : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase__ : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : int = model(**_lowercase )
self.assertEqual(len(result.feature_maps ), len(config.out_indices ) )
self.assertEqual(len(model.channels ), len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase__ : Tuple = copy.deepcopy(_lowercase )
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : str = model(**_lowercase )
self.assertEqual(len(result.feature_maps ), 1 )
self.assertEqual(len(model.channels ), 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase__ : Tuple = copy.deepcopy(_lowercase )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : List[Any] = model(**_lowercase )
| 370 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class snake_case ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ):
super(__A , self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A )
__UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ):
return self.bert(**__A ).last_hidden_state
def _lowerCamelCase ( self : Tuple , __A : Tuple ):
return token_embeddings.sum(2 , keepdim=__A )
def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ):
return self.softmax(T * self.cos(__A , __A ) )
def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ):
__UpperCamelCase = W_supports['sizes'].tolist()
__UpperCamelCase = W_supports['start_token_id'].item()
__UpperCamelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = self.BERT(**__A )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['input_ids'] == start_token_id
__UpperCamelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__A ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 53 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = '▁'
_lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
_lowerCAmelCase :Tuple = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =PRETRAINED_VOCAB_FILES_MAP
a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =['''input_ids''', '''attention_mask''']
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
_UpperCAmelCase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
_UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = self.__dict__.copy()
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Any = [self.cls_token_id]
_UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCAmelCase ( self , A , A = None ) -> List[int]:
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def __lowerCAmelCase ( self , A ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase : Any = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , A ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , A ) -> int:
_UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
_UpperCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 263 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __get__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
lowerCAmelCase_ : Tuple = '__cached_' + self.fget.__name__
lowerCAmelCase_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if cached is None:
lowerCAmelCase_ : int = self.fget(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cached
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
return isinstance(lowerCAmelCase__ , np.ndarray )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
return _is_numpy(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Any:
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Dict:
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Dict:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Tuple:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : int = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"{self.__class__.__name__} should not have more than one required field." )
lowerCAmelCase_ : Tuple = getattr(self , class_fields[0].name )
lowerCAmelCase_ : Optional[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = first_field.items()
lowerCAmelCase_ : int = True
else:
try:
lowerCAmelCase_ : Dict = iter(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = True
except TypeError:
lowerCAmelCase_ : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE_ ):
if (
not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE_ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase_ : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase_ : Optional[Any] = element[1]
elif first_field is not None:
lowerCAmelCase_ : Union[str, Any] = first_field
else:
for field in class_fields:
lowerCAmelCase_ : int = getattr(self , field.name )
if v is not None:
lowerCAmelCase_ : Dict = v
def __delitem__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Any ):
raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ):
raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ):
raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def SCREAMING_SNAKE_CASE__ ( self : Any , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : int , SCREAMING_SNAKE_CASE_ : Dict ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __setitem__( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
# Will raise a KeyException if needed
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return tuple(self[k] for k in self.keys() )
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , SCREAMING_SNAKE_CASE_ : int ):
raise ValueError(
F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """longest"""
_SCREAMING_SNAKE_CASE = """max_length"""
_SCREAMING_SNAKE_CASE = """do_not_pad"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """pt"""
_SCREAMING_SNAKE_CASE = """tf"""
_SCREAMING_SNAKE_CASE = """np"""
_SCREAMING_SNAKE_CASE = """jax"""
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[ContextManager] ):
lowerCAmelCase_ : Tuple = context_managers
lowerCAmelCase_ : List[str] = ExitStack()
def __enter__( self : Dict ):
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE_ )
def __exit__( self : str , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ):
self.stack.__exit__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCAmelCase_ : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase_ : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase_ : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = model_class.__name__
lowerCAmelCase_ : Optional[Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
lowerCAmelCase_ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase_ : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase_ : Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase_ ( lowerCAmelCase__ : MutableMapping , lowerCAmelCase__ : str = "" , lowerCAmelCase__ : str = "." ) -> Any:
"""simple docstring"""
def _flatten_dict(lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]="" , lowerCAmelCase__ : List[Any]="." ):
for k, v in d.items():
lowerCAmelCase_ : Tuple = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : bool = False ) -> List[str]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any=None ) -> Dict:
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for transpose: {type(lowerCAmelCase__ )}." )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for reshape: {type(lowerCAmelCase__ )}." )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for squeeze: {type(lowerCAmelCase__ )}." )
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> Dict:
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(lowerCAmelCase__ )}." )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
lowerCAmelCase_ : Union[str, Any] = [f"{repo_id}--{v}" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase_ : Dict = f"{repo_id}--{value}"
return auto_map
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
for base_class in inspect.getmro(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = base_class.__module__
lowerCAmelCase_ : Union[str, Any] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 289 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __magic_name__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Tuple ) -> int:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
lowercase : int = torch.load(__snake_case , map_location="cpu" )["model"]
lowercase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase : Optional[int] = {}
lowercase : Union[str, Any] = "first_stage_model."
for key in keys:
if key.startswith(__snake_case ):
lowercase : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
lowercase : Optional[int] = {}
lowercase : List[Any] = "model.diffusion_model."
for key in keys:
if key.startswith(__snake_case ):
lowercase : Optional[Any] = state_dict[key]
lowercase : Dict = config.model.params.first_stage_config.params
lowercase : List[str] = config.model.params.unet_config.params
lowercase : Union[str, Any] = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
lowercase : List[Any] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
lowercase : Dict = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
lowercase : Optional[Any] = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_A : Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 202 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A : int = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = DebertaVaTokenizer
__lowerCAmelCase = DebertaVaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def __magic_name__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Any = DebertaVaTokenizer(_a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , _a ):
lowercase : int = "this is a test"
lowercase : Tuple = "this is a test"
return input_text, output_text
def __magic_name__ ( self ):
lowercase : List[Any] = "<pad>"
lowercase : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __magic_name__ ( self ):
lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_a ) , 30_001 )
def __magic_name__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __magic_name__ ( self ):
# fmt: off
lowercase : List[str] = " \tHeLLo!how \n Are yoU? "
lowercase : str = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
lowercase : Union[str, Any] = DebertaVaTokenizer(_a , do_lower_case=_a )
lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : str = DebertaVaTokenizerFast(_a , do_lower_case=_a )
lowercase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __magic_name__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
# fmt: off
lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
lowercase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : List[Any] = DebertaVaTokenizer(_a , split_by_punct=_a )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , split_by_punct=_a )
lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Tuple = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : List[str] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : List[Any] = "I was born in 92000, and this is falsé."
lowercase : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase : List[Any] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Dict = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
lowercase : Union[str, Any] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
# fmt: off
lowercase : Dict = " \tHeLLo!how \n Are yoU? "
lowercase : str = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
lowercase : Optional[int] = DebertaVaTokenizer(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : List[str] = DebertaVaTokenizerFast(_a , do_lower_case=_a , split_by_punct=_a )
lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : str = "I was born in 92000, and this is falsé."
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_a , add_special_tokens=_a ) )
lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_a , add_special_tokens=_a ) )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.encode(_a , add_special_tokens=_a )
lowercase : Dict = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = self.get_rust_tokenizer()
lowercase : Tuple = tokenizer.encode(_a )
lowercase : List[str] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = "This is a test"
lowercase : Tuple = [13, 1, 4_398, 25, 21, 1_289]
lowercase : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
lowercase : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
lowercase : Any = DebertaVaTokenizer(_a , keep_accents=_a )
lowercase : Dict = DebertaVaTokenizerFast(_a , keep_accents=_a )
lowercase : str = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
lowercase : int = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
# fmt: off
lowercase : int = "I was born in 92000, and this is falsé."
lowercase : Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
lowercase : List[str] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
lowercase : str = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
lowercase : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : str = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
lowercase : Optional[int] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : Optional[int] = DebertaVaTokenizer(_a )
lowercase : List[Any] = tokenizer.encode("sequence builders" )
lowercase : Dict = tokenizer.encode("multi-sequence build" )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(_a )
lowercase : Dict = tokenizer.build_inputs_with_special_tokens(_a , _a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _a , )
@slow
def __magic_name__ ( self ):
# fmt: off
lowercase : Dict = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 202 | 1 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def lowercase__( __UpperCamelCase: Tuple=None ,__UpperCamelCase: str=None ):
"""simple docstring"""
return field(default_factory=lambda: default ,metadata=lowercase_ )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
A : bool = field(
default=A__ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
A : bool = field(
default=A__ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
A : bool = field(
default=A__ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
A : bool = field(
default=A__ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
A : Optional[str] = field(
default=A__ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
A : Optional[List[str]] = list_field(
default=A__ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
try:
int(lowercase_ )
return True
except ValueError:
return False
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
try:
float(lowercase_ )
return True
except ValueError:
return False
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = args
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file, newline='' ) as csv_file:
SCREAMING_SNAKE_CASE : Union[str, Any] = csv.DictReader(lowerCamelCase__ )
for row in reader:
SCREAMING_SNAKE_CASE : Optional[Any] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE : int = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
SCREAMING_SNAKE_CASE : Tuple = float(row['result'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = plt.subplots()
SCREAMING_SNAKE_CASE : str = 'Time usage' if self.args.is_time else 'Memory usage'
SCREAMING_SNAKE_CASE : Tuple = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(set(self.result_dict[model_name]['bsz'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.result_dict[model_name]['result']
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
SCREAMING_SNAKE_CASE : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
SCREAMING_SNAKE_CASE : str = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results], dtype=lowerCamelCase__, )
else:
SCREAMING_SNAKE_CASE : Tuple = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results], dtype=np.floataa, )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : Any = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase__, lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__, lowerCamelCase__, label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(lowerCamelCase__, lowerCamelCase__, '--' )
title_str += F" {label_model_name} vs."
SCREAMING_SNAKE_CASE : Any = title_str[:-4]
SCREAMING_SNAKE_CASE : str = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = HfArgumentParser(lowercase_ )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = Plot(args=lowercase_ )
plot.plot()
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
A : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
A : ClassVar[Features] = Features({'''labels''': ClassLabel} )
A : str = "text"
A : str = "labels"
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column], A ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Optional[Any] = self.label_schema.copy()
SCREAMING_SNAKE_CASE : Optional[int] = features[self.label_column]
SCREAMING_SNAKE_CASE : Optional[Any] = label_schema
return task_template
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 246 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A , A ) -> Tuple:
snake_case : Tuple = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[str] = None
snake_case : Any = 2_0
snake_case : int = self._get_uniform_logits(batch_size=2 , length=A )
# tweak scores to not be uniform anymore
snake_case : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
snake_case : Optional[Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
snake_case : int = jax.nn.softmax(A , axis=-1 )
snake_case : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
snake_case : Union[str, Any] = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 )
snake_case : str = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = None
snake_case : List[str] = 1_0
snake_case : Optional[int] = 2
# create ramp distribution
snake_case : int = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy()
snake_case : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
snake_case : Optional[int] = FlaxTopKLogitsWarper(3 )
snake_case : Optional[Any] = top_k_warp(A , A , cur_len=A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
snake_case : List[Any] = 5
snake_case : int = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
snake_case : Union[str, Any] = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy()
snake_case : Optional[int] = top_k_warp_safety_check(A , A , cur_len=A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Dict = None
snake_case : Tuple = 1_0
snake_case : Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
snake_case : List[str] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
snake_case : Any = FlaxTopPLogitsWarper(0.8 )
snake_case : Optional[int] = np.exp(top_p_warp(A , A , cur_len=A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
snake_case : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# check edge cases with negative and extreme logits
snake_case : List[Any] = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
snake_case : str = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
snake_case : Optional[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
snake_case : Optional[int] = top_p_warp(A , A , cur_len=A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = 2_0
snake_case : Union[str, Any] = 4
snake_case : Tuple = 0
snake_case : str = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
# check that min length is applied at length 5
snake_case : Optional[int] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
snake_case : Union[str, Any] = 5
snake_case : Tuple = self._get_uniform_logits(A , A )
snake_case : Dict = min_dist_processor(A , A , cur_len=A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
snake_case : str = self._get_uniform_logits(A , A )
snake_case : Dict = 1_5
snake_case : Tuple = min_dist_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = 2_0
snake_case : str = 4
snake_case : List[str] = 0
snake_case : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
# check that all scores are -inf except the bos_token_id score
snake_case : List[Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
snake_case : Dict = 1
snake_case : Optional[Any] = self._get_uniform_logits(A , A )
snake_case : List[str] = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
snake_case : Tuple = 3
snake_case : Optional[Any] = self._get_uniform_logits(A , A )
snake_case : List[str] = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = 2_0
snake_case : Union[str, Any] = 4
snake_case : Union[str, Any] = 0
snake_case : Tuple = 5
snake_case : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
# check that all scores are -inf except the eos_token_id when max_length is reached
snake_case : Union[str, Any] = ids_tensor((batch_size, 4) , vocab_size=2_0 )
snake_case : Optional[int] = 4
snake_case : List[str] = self._get_uniform_logits(A , A )
snake_case : Tuple = logits_processor(A , A , cur_len=A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
snake_case : Optional[int] = 3
snake_case : int = self._get_uniform_logits(A , A )
snake_case : Tuple = logits_processor(A , A , cur_len=A )
self.assertFalse(jnp.isinf(A ).any() )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = 4
snake_case : Union[str, Any] = 1_0
snake_case : Optional[int] = 1_5
snake_case : List[str] = 2
snake_case : Optional[Any] = 1
snake_case : Tuple = 1_5
# dummy input_ids and scores
snake_case : List[str] = ids_tensor((batch_size, sequence_length) , A )
snake_case : int = input_ids.copy()
snake_case : str = self._get_uniform_logits(A , A )
snake_case : List[Any] = scores.copy()
# instantiate all dist processors
snake_case : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case : List[Any] = FlaxTopKLogitsWarper(3 )
snake_case : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
snake_case : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
snake_case : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
snake_case : int = 1_0
# no processor list
snake_case : Tuple = temp_dist_warp(A , A , cur_len=A )
snake_case : str = top_k_warp(A , A , cur_len=A )
snake_case : Optional[int] = top_p_warp(A , A , cur_len=A )
snake_case : Tuple = min_dist_proc(A , A , cur_len=A )
snake_case : Any = bos_dist_proc(A , A , cur_len=A )
snake_case : str = eos_dist_proc(A , A , cur_len=A )
# with processor list
snake_case : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case : Optional[Any] = processor(A , A , cur_len=A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[Any] = 4
snake_case : List[str] = 1_0
snake_case : Union[str, Any] = 1_5
snake_case : Dict = 2
snake_case : int = 1
snake_case : Optional[Any] = 1_5
# dummy input_ids and scores
snake_case : List[Any] = ids_tensor((batch_size, sequence_length) , A )
snake_case : Tuple = input_ids.copy()
snake_case : int = self._get_uniform_logits(A , A )
snake_case : List[str] = scores.copy()
# instantiate all dist processors
snake_case : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
snake_case : Tuple = FlaxTopKLogitsWarper(3 )
snake_case : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
snake_case : int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=A )
snake_case : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A )
snake_case : str = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A )
snake_case : int = 1_0
# no processor list
def run_no_processor_list(A , A , A ):
snake_case : Optional[Any] = temp_dist_warp(A , A , cur_len=A )
snake_case : List[str] = top_k_warp(A , A , cur_len=A )
snake_case : Tuple = top_p_warp(A , A , cur_len=A )
snake_case : List[Any] = min_dist_proc(A , A , cur_len=A )
snake_case : Union[str, Any] = bos_dist_proc(A , A , cur_len=A )
snake_case : List[Any] = eos_dist_proc(A , A , cur_len=A )
return scores
# with processor list
def run_processor_list(A , A , A ):
snake_case : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
snake_case : int = processor(A , A , cur_len=A )
return scores
snake_case : Dict = jax.jit(A )
snake_case : Union[str, Any] = jax.jit(A )
snake_case : Any = jitted_run_no_processor_list(A , A , A )
snake_case : str = jitted_run_processor_list(A , A , A )
# scores should be equal
self.assertTrue(jnp.allclose(A , A , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 124 | """simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self ):
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase,'''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase,'''num_encoder_blocks''' ) )
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=64,__lowerCamelCase=3,__lowerCamelCase=4,__lowerCamelCase=[2, 2, 2, 2],__lowerCamelCase=[8, 4, 2, 1],__lowerCamelCase=[16, 32, 64, 128],__lowerCamelCase=[1, 4, 8, 16],__lowerCamelCase=[1, 2, 4, 8],__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=3,__lowerCamelCase=None,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size],self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size,num_channels=self.num_channels,num_encoder_blocks=self.num_encoder_blocks,depths=self.depths,hidden_sizes=self.hidden_sizes,num_attention_heads=self.num_attention_heads,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,initializer_range=self.initializer_range,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = SegformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss,0.0 )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = 1
A__ = SegformerForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = torch.randint(0,1,(self.batch_size, self.image_size, self.image_size) ).to(__lowerCamelCase )
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertGreater(result.loss,0.0 )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self,config_class=__lowerCamelCase )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowerCamelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],)
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ),[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len],)
A__ = len(__lowerCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
self.assertEqual(out_len + 1,len(__lowerCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len],)
def UpperCamelCase ( self ):
def check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCamelCase,__lowerCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowerCamelCase ),__lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ),[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],)
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ):
continue
A__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A__ = self._prepare_for_class(__lowerCamelCase,__lowerCamelCase,return_labels=__lowerCamelCase )
A__ = model(**__lowerCamelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self ):
pass
@slow
def UpperCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->Optional[Any]:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(512, 512),keep_ratio=__lowerCamelCase,align=__lowerCamelCase,do_random_crop=__lowerCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase )
A__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(512, 512),keep_ratio=__lowerCamelCase,align=__lowerCamelCase,do_random_crop=__lowerCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase )
A__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3],__lowerCamelCase,atol=1E-1 ) )
@slow
def UpperCamelCase ( self ):
# only resize + normalize
A__ = SegformerImageProcessor(
image_scale=(512, 512),keep_ratio=__lowerCamelCase,align=__lowerCamelCase,do_random_crop=__lowerCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
A__ = model(__lowerCamelCase )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase,target_sizes=[(500, 300)] )
A__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape,__lowerCamelCase )
A__ = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
A__ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape,__lowerCamelCase )
| 39 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
A : Optional[int] = precision
A : str = ceil(precision / 14 )
A : List[str] = 42_6880 * Decimal(1_0005 ).sqrt()
A : List[str] = 1
A : Tuple = 1359_1409
A : List[Any] = Decimal(snake_case__ )
for k in range(1 , snake_case__ ):
A : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase : int = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 3 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase : str = get_tests_dir('fixtures/vocab.json')
lowercase : int = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = WavaVecaConfig()
A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = WavaVecaFeatureExtractor()
A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[Any] = WavaVecaFeatureExtractor()
A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write('''{}''' )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
A : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = '''AutoFeatureExtractor'''
__magic_name__ = '''AutoTokenizer'''
__magic_name__ = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A : Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Dict:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , )
A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 3 | 1 |
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase = [8, 5, 9, 7]
lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= claim_vector
__lowercase= allocated_resources_table
__lowercase= maximum_claim_table
def _A (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A (self ):
return {self.__need().index(__a ): i for i in self.__need()}
def _A (self , **lowerCAmelCase ):
__lowercase= self.__need()
__lowercase= self.__allocated_resources_table
__lowercase= self.__available_resources()
__lowercase= self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
__lowercase= False
for each_need in need_list:
__lowercase= True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__lowercase= False
break
if execution:
__lowercase= True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase= original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__lowercase= np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def _A (self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__a ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__a ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304 | 0 |
import math
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase__ : int = n
UpperCamelCase__ : List[Any] = [
[math.inf for j in range(0, __magic_name__ )] for i in range(0, __magic_name__ )
] # adjacency matrix for weight
UpperCamelCase__ : Any = [
[math.inf for j in range(0, __magic_name__ )] for i in range(0, __magic_name__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = w
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
UpperCamelCase__ : Union[str, Any] = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 201 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "cpu", __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = device
UpperCamelCase__ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCamelCase__ : List[str] = torchvision.transforms.Resize(224 )
UpperCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.resize(__magic_name__ )
UpperCamelCase__ : Dict = self.center_crop(__magic_name__ )
UpperCamelCase__ : List[str] = self.normalize(__magic_name__ )
return images
def __call__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(text=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[Any] = self.preprocess_img(__magic_name__ )
UpperCamelCase__ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__=10, __magic_name__=0.01, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=False, __magic_name__=True, __magic_name__="image", __magic_name__=True, __magic_name__=False, __magic_name__=False, __magic_name__=False, ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = device if device else get_device()
if vqgan:
UpperCamelCase__ : Union[str, Any] = vqgan
else:
UpperCamelCase__ : Any = load_vqgan(self.device, conf_path=__magic_name__, ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
UpperCamelCase__ : Optional[Any] = clip
else:
UpperCamelCase__ : Any = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCamelCase__ : str = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ : Union[str, Any] = iterations
UpperCamelCase__ : Tuple = lr
UpperCamelCase__ : Optional[int] = log
UpperCamelCase__ : List[Any] = make_grid
UpperCamelCase__ : Optional[Any] = return_val
UpperCamelCase__ : str = quantize
UpperCamelCase__ : int = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None, __magic_name__=5, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
if output_path is None:
UpperCamelCase__ : List[str] = '''./animation.gif'''
if input_path is None:
UpperCamelCase__ : Union[str, Any] = self.save_path
UpperCamelCase__ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCamelCase__ : Dict = total_duration / len(__magic_name__ )
UpperCamelCase__ : List[Any] = [frame_duration] * len(__magic_name__ )
if extend_frames:
UpperCamelCase__ : List[Any] = 1.5
UpperCamelCase__ : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__, __magic_name__, duration=__magic_name__ )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCamelCase__ : List[Any] = preprocess(Image.open(__magic_name__ ), target_image_size=256 ).to(self.device )
UpperCamelCase__ : str = preprocess_vqgan(__magic_name__ )
UpperCamelCase__ ,*UpperCamelCase__ : Union[str, Any] = self.vqgan.encode(__magic_name__ )
return z
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__ : Any = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ ,*UpperCamelCase__ : int = self.vqgan.quantize(__magic_name__ )
else:
UpperCamelCase__ : Optional[int] = trans_latent
return self.vqgan.decode(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.clip_preprocessor(text=__magic_name__, images=__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.clip(**__magic_name__ )
UpperCamelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''], __magic_name__, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCamelCase__ : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''], __magic_name__, weights=neg_prompts['''weights'''] )
else:
UpperCamelCase__ : Optional[int] = torch.tensor([1], device=self.device )
UpperCamelCase__ : Tuple = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.randn_like(self.latent, requires_grad=__magic_name__, device=self.device )
UpperCamelCase__ : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ : Tuple = self._add_vector(__magic_name__ )
UpperCamelCase__ : Any = loop_post_process(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__magic_name__, __magic_name__, __magic_name__ )
print('''CLIP loss''', __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
wandb.init(reinit=__magic_name__, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCamelCase__ : List[str] = Image.open(__magic_name__ )
UpperCamelCase__ : List[Any] = image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if not prompts:
return []
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__, (tuple, list) ):
UpperCamelCase__ : Optional[int] = prompt[0]
UpperCamelCase__ : Dict = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = prompt.split(''':''' )
UpperCamelCase__ : List[Any] = float(__magic_name__ )
else:
UpperCamelCase__ : List[str] = prompt
UpperCamelCase__ : Any = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__, device=self.device ),
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=True, __magic_name__=None, ) -> str:
"""simple docstring"""
if image_path:
UpperCamelCase__ : Union[str, Any] = self._get_latent(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(__magic_name__, __magic_name__, __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ : Optional[Any] = self.process_prompts(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
UpperCamelCase__ : str = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
UpperCamelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = save_path
UpperCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
UpperCamelCase__ : Optional[Any] = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__, __magic_name__, __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 201 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase_ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''maskformer'''
snake_case = {'''hidden_size''': '''mask_feature_size'''}
snake_case = ['''resnet''', '''swin''']
snake_case = ['''detr''']
def __init__( self : Union[str, Any] , __UpperCAmelCase : int = 256 , __UpperCAmelCase : int = 256 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[Dict] = None , __UpperCAmelCase : Optional[Dict] = None , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : float = 20.0 , __UpperCAmelCase : Optional[bool] = None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_A = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = backbone_config.pop("model_type" )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(__UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_A = DetrConfig()
else:
# verify that the decoder is supported
_A = (
decoder_config.pop("model_type" ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported )}''' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = CONFIG_MAPPING[decoder_type]
_A = config_class.from_dict(__UpperCAmelCase )
_A = backbone_config
_A = decoder_config
# main feature dimension for the model
_A = fpn_feature_size
_A = mask_feature_size
# initializer
_A = init_std
_A = init_xavier_std
# Hungarian matcher && loss
_A = cross_entropy_weight
_A = dice_weight
_A = mask_weight
_A = use_auxiliary_loss
_A = no_object_weight
_A = output_auxiliary_logits
_A = self.decoder_config.encoder_attention_heads
_A = self.decoder_config.num_hidden_layers
super().__init__(**__UpperCAmelCase )
@classmethod
def lowerCAmelCase ( cls : List[Any] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
return cls(
backbone_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , **__UpperCAmelCase , )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.backbone_config.to_dict()
_A = self.decoder_config.to_dict()
_A = self.__class__.model_type
return output
| 174 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __lowercase ( __lowercase = 150_0000 ) -> int:
'''simple docstring'''
_A = defaultdict(__lowercase )
_A = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 0 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [True] * limit
_snake_case = False
_snake_case = False
_snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_snake_case = i * 2
while index < limit:
_snake_case = False
_snake_case = index + i
_snake_case = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100_0000 ):
_snake_case = prime_sieve(_SCREAMING_SNAKE_CASE )
_snake_case = 0
_snake_case = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length , len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_snake_case = j - i
_snake_case = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''') | 270 |
'''simple docstring'''
from math import sqrt
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
for i in range(1 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(_SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(_SCREAMING_SNAKE_CASE ):
total += i
return total - n
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 1_0000 ):
_snake_case = sum(
i
for i in range(1 , _SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(_SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(_SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 270 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
lowercase_ = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case__ )
if matches:
lowercase_ = float(matches[1] )
lowercase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowercase_ = 1_001
lowercase_ = '''imagenet-1k-id2label.json'''
lowercase_ = '''huggingface/label-files'''
lowercase_ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(snake_case__ ) + 1: v for k, v in idalabel.items()}
lowercase_ = '''background'''
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def a ( snake_case__: Optional[int] , snake_case__: int , snake_case__: Dict , snake_case__: Optional[Any]=False ):
'''simple docstring'''
lowercase_ = get_mobilenet_va_config(snake_case__ )
# Load 🤗 model
lowercase_ = MobileNetVaForImageClassification(snake_case__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(snake_case__ , snake_case__ , snake_case__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowercase_ = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
lowercase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
lowercase_ = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowercase_ = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowercase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print('''Pushing to the hub...''' )
lowercase_ = '''google/''' + model_name
image_processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 30 |
import string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A__ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCamelCase :
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
UpperCamelCase : bool = field(default=snake_case_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
UpperCamelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
UpperCamelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
UpperCamelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
UpperCamelCase : bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
def _dataset(UpperCamelCase__ , UpperCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase__ ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a , _a , _a : List[str] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_a : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_a : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_a : List[Any] = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_a : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : Optional[int] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Union[str, Any] = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
_a : str = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Union[str, Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
_a : Optional[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_a : int = trainer.evaluate()
_a : Dict = math.exp(eval_output["""eval_loss"""] )
_a : Union[str, Any] = {"""perplexity""": perplexity}
_a : Optional[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 324 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Dict = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
A : Optional[Any] = {
"b0": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_2_4,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_4_0,
"dropout_rate": 0.2,
"dw_padding": [1_6],
},
"b2": {
"hidden_dim": 1_4_0_8,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_6_0,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 1_6],
},
"b3": {
"hidden_dim": 1_5_3_6,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_0_0,
"dropout_rate": 0.3,
"dw_padding": [5, 1_8],
},
"b4": {
"hidden_dim": 1_7_9_2,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_8_0,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_0_4_8,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_5_6,
"dropout_rate": 0.4,
"dw_padding": [1_3, 2_7],
},
"b6": {
"hidden_dim": 2_3_0_4,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_2_8,
"dropout_rate": 0.5,
"dw_padding": [3_1],
},
"b7": {
"hidden_dim": 2_5_6_0,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_0_0,
"dropout_rate": 0.5,
"dw_padding": [1_8],
},
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = EfficientNetConfig()
__lowerCAmelCase = CONFIG_MAP[model_name]["hidden_dim"]
__lowerCAmelCase = CONFIG_MAP[model_name]["width_coef"]
__lowerCAmelCase = CONFIG_MAP[model_name]["depth_coef"]
__lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
__lowerCAmelCase = CONFIG_MAP[model_name]["dropout_rate"]
__lowerCAmelCase = CONFIG_MAP[model_name]["dw_padding"]
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = 1000
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
__lowerCAmelCase = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCamelCase , )
return preprocessor
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__lowerCAmelCase = sorted(set(_UpperCamelCase ) )
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__lowerCAmelCase = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__lowerCAmelCase = block_name_mapping[b]
rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__lowerCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowerCAmelCase = "efficientnet." + item[1]
__lowerCAmelCase = "classifier.weight"
__lowerCAmelCase = "classifier.bias"
return key_mapping
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowerCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowerCAmelCase = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowerCAmelCase = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowerCAmelCase = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__lowerCAmelCase = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = model_classes[model_name](
include_top=_UpperCamelCase , weights="imagenet" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1000 , classifier_activation="softmax" , )
__lowerCAmelCase = original_model.trainable_variables
__lowerCAmelCase = original_model.non_trainable_variables
__lowerCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowerCAmelCase = param.numpy()
__lowerCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
__lowerCAmelCase = get_efficientnet_config(_UpperCamelCase )
__lowerCAmelCase = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__lowerCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__lowerCAmelCase = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__lowerCAmelCase = convert_image_processor(_UpperCamelCase )
__lowerCAmelCase = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowerCAmelCase = hf_model(**_UpperCamelCase )
__lowerCAmelCase = outputs.logits.detach().numpy()
# Original model inference
__lowerCAmelCase = False
__lowerCAmelCase = CONFIG_MAP[model_name]["image_size"]
__lowerCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowerCAmelCase = image.img_to_array(_UpperCamelCase )
__lowerCAmelCase = np.expand_dims(_UpperCamelCase , axis=0 )
__lowerCAmelCase = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"Pushing converted {model_name} to the hub..." )
__lowerCAmelCase = f"efficientnet-{model_name}"
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
A : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 57 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
lowercase : Union[str, Any] =XGLMConfig
lowercase : Optional[Any] ={}
lowercase : Optional[int] ='gelu'
def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =d_model
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =ffn_dim
lowerCamelCase_ =activation_function
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =None
lowerCamelCase_ =0
lowerCamelCase_ =2
lowerCamelCase_ =1
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =self.get_config()
lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase : Tuple =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase : Optional[Any] =False
lowercase : Optional[Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' )
lowerCamelCase_ =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] )
lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =(
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ ='''left'''
# use different length sentences to test batching
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase )
lowerCamelCase_ =inputs['''input_ids''']
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
| 75 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
_a , split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , num_proc=_a , **_a , )
_UpperCAmelCase = field
_UpperCAmelCase = path_or_paths if isinstance(_a , _a ) else {self.split: path_or_paths}
_UpperCAmelCase = Json(
cache_dir=_a , data_files=_a , features=_a , field=_a , **_a , )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = 'utf-8'
_UpperCAmelCase = to_json_kwargs
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.to_json_kwargs.pop('path_or_buf' , _a )
_UpperCAmelCase = self.to_json_kwargs.pop('orient' , 'records' )
_UpperCAmelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_UpperCAmelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_UpperCAmelCase = self.to_json_kwargs.pop('compression' , _a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_a ) as buffer:
_UpperCAmelCase = self._write(file_obj=_a , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_UpperCAmelCase = self._write(
file_obj=self.path_or_buf , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = args
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas().to_json(
path_or_buf=_a , orient=_a , lines=_a , index=_a , **_a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_UpperCAmelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_a )
else:
_UpperCAmelCase , _UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_a )
return written
| 363 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __a ( pl.LightningModule ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = model
_UpperCAmelCase = 2
_UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase__ ( a__: str , a__: str , a__: str ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = LongformerModel.from_pretrained(a__ )
_UpperCAmelCase = LightningModel(a__ )
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 185 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=lowerCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=lowerCAmelCase , help="""where to store parsed gold_data_path file""" , )
_lowerCAmelCase = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
_lowerCAmelCase = json.load(lowerCAmelCase )
for dpr_record in tqdm(lowerCAmelCase ):
_lowerCAmelCase = dpr_record["""question"""]
_lowerCAmelCase = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(lowerCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 70 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = StableDiffusionLatentUpscalePipeline
A : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
A : Optional[int] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Optional[Any] = frozenset([] )
A : Any = True
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[Any] = 4
SCREAMING_SNAKE_CASE : Tuple = (16, 16)
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(A )
return image
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=A, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=A, only_cross_attention=A, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
SCREAMING_SNAKE_CASE : int = EulerDiscreteScheduler(prediction_type='sample' )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='quick_gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : str = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'cpu'
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**A ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE : str = getattr(A, scheduler_enum.name )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : str = pipe(**A )[0]
outputs.append(A )
assert check_same_shape(A )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE : Any = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(A, generator=A, output_type='latent' ).images
SCREAMING_SNAKE_CASE : Dict = upscaler(
prompt=A, image=A, num_inference_steps=20, guidance_scale=0, generator=A, output_type='np', ).images[0]
SCREAMING_SNAKE_CASE : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE : Any = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
SCREAMING_SNAKE_CASE : Union[str, Any] = upscaler(
prompt=A, image=A, num_inference_steps=20, guidance_scale=0, generator=A, output_type='np', ).images[0]
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 354 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : Dict = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = n_positions
SCREAMING_SNAKE_CASE : Tuple = n_embd
SCREAMING_SNAKE_CASE : Any = n_layer
SCREAMING_SNAKE_CASE : Tuple = n_head
SCREAMING_SNAKE_CASE : Union[str, Any] = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = summary_type
SCREAMING_SNAKE_CASE : int = summary_use_proj
SCREAMING_SNAKE_CASE : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE : List[str] = summary_first_dropout
SCREAMING_SNAKE_CASE : Any = summary_proj_to_labels
super().__init__(**A )
| 246 | 0 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase__ = datasets.logging.get_logger(__name__)
lowercase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowercase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowercase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_="dummy_doc" ):
UpperCAmelCase : Any = {doc: key_lines}
UpperCAmelCase : str = {doc: sys_lines}
UpperCAmelCase : Any = {}
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : str = reader.get_doc_mentions(_snake_case , key_doc_lines[doc] , _snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : List[Any] = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
UpperCAmelCase : int = reader.get_doc_mentions(_snake_case , sys_doc_lines[doc] , _snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase : str = reader.set_annotated_parse_trees(_snake_case , key_doc_lines[doc] , _snake_case , _snake_case )
if remove_nested:
UpperCAmelCase : Optional[int] = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase : int = reader.remove_nested_coref_mentions(_snake_case , _snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase : List[Any] = reader.get_mention_assignments(_snake_case , _snake_case )
UpperCAmelCase : Any = reader.get_mention_assignments(_snake_case , _snake_case )
UpperCAmelCase : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = get_coref_infos(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase : Tuple = evaluator.evaluate_documents(_snake_case , _snake_case , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase : Optional[Any] = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Any = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
UpperCAmelCase : Dict = line.split()[5]
if not parse_col == "-":
UpperCAmelCase : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase_ ( self : str , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str=True , lowercase_ : List[Any]=False , lowercase_ : Tuple=False , lowercase_ : int=False ) -> Tuple:
UpperCAmelCase : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase : Optional[int] = util.check_gold_parse_annotation(a_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase : Tuple = evaluate(
key_lines=a_ , sys_lines=a_ , metrics=a_ , NP_only=a_ , remove_nested=a_ , keep_singletons=a_ , min_span=a_ , )
return score
| 151 |
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase):
_a = (DPMSolverSinglestepScheduler,)
_a = (('''num_inference_steps''', 25),)
def SCREAMING_SNAKE_CASE ( self: int , **_lowerCAmelCase: int ):
lowercase :Optional[int] = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**_lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple=0 , **_lowerCAmelCase: Optional[int] ):
lowercase :List[str] = dict(self.forward_default_kwargs )
lowercase :Dict = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
lowercase :int = self.dummy_sample
lowercase :List[Any] = 0.1 * sample
lowercase :Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase :Tuple = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :Tuple = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase :Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase :Optional[int] = scheduler_class.from_pretrained(_lowerCAmelCase )
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals
lowercase :Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase , lowercase :Any = sample, sample
for t in range(_lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
lowercase :Optional[int] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase :Dict = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: str=0 , **_lowerCAmelCase: int ):
lowercase :List[Any] = dict(self.forward_default_kwargs )
lowercase :Optional[Any] = kwargs.pop("num_inference_steps" , _lowerCAmelCase )
lowercase :int = self.dummy_sample
lowercase :str = 0.1 * sample
lowercase :Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase :Any = self.get_scheduler_config()
lowercase :int = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase :Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCAmelCase )
lowercase :List[Any] = scheduler_class.from_pretrained(_lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase :Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
lowercase :Optional[Any] = new_scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Optional[int]=None , **_lowerCAmelCase: List[Any] ):
if scheduler is None:
lowercase :str = self.scheduler_classes[0]
lowercase :int = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase )
lowercase :Tuple = self.scheduler_classes[0]
lowercase :Tuple = self.get_scheduler_config(**_lowerCAmelCase )
lowercase :List[Any] = scheduler_class(**_lowerCAmelCase )
lowercase :int = 10
lowercase :Any = self.dummy_model()
lowercase :Dict = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase :str = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Optional[int] = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase :Optional[int] = 50
lowercase :Tuple = self.dummy_model()
lowercase :Any = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase :Dict = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :int = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.25_74 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase :str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase :Dict = self.full_loop(scheduler=_lowerCAmelCase )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
lowercase :List[str] = DEISMultistepScheduler.from_config(scheduler.config )
lowercase :Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase :Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase :List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase :int = self.full_loop(scheduler=_lowerCAmelCase )
lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: int ):
self.check_over_configs(thresholding=_lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , algorithm_type="dpmsolver++" , solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
lowercase :List[Any] = self.full_loop(
solver_order=_lowerCAmelCase , solver_type=_lowerCAmelCase , prediction_type=_lowerCAmelCase , algorithm_type=_lowerCAmelCase , )
assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE ( self: Any ):
self.check_over_configs(lower_order_final=_lowerCAmelCase )
self.check_over_configs(lower_order_final=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def SCREAMING_SNAKE_CASE ( self: Any ):
self.check_over_configs(variance_type=_lowerCAmelCase )
self.check_over_configs(variance_type="learned_range" )
def SCREAMING_SNAKE_CASE ( self: Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_lowerCAmelCase , time_step=0 )
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Optional[int] = self.full_loop()
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Union[str, Any] = self.full_loop(use_karras_sigmas=_lowerCAmelCase )
lowercase :str = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.22_48 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.14_53 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :List[Any] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=_lowerCAmelCase )
lowercase :int = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_mean.item() - 0.06_49 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = self.scheduler_classes[0]
lowercase :Optional[Any] = self.get_scheduler_config(thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0 )
lowercase :str = scheduler_class(**_lowerCAmelCase )
lowercase :List[Any] = 10
lowercase :Optional[Any] = self.dummy_model()
lowercase :Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase :Optional[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Any = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 158 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, **lowerCamelCase ):
lowercase :List[Any] = AutoConfig.from_pretrained(lowerCamelCase, **lowerCamelCase )
lowercase :Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 158 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowerCAmelCase_ = '''bert-base-cased'''
lowerCAmelCase_ = '''google/pegasus-xsum'''
lowerCAmelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowerCAmelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random'''
lowerCAmelCase_ = '''sshleifer/bart-tiny-random'''
lowerCAmelCase_ = '''sshleifer/tiny-mbart'''
lowerCAmelCase_ = '''sshleifer/tiny-marian-en-de'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE__ )
Path(SCREAMING_SNAKE_CASE__ ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.source''' ) , SCREAMING_SNAKE_CASE__ )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.target''' ) , SCREAMING_SNAKE_CASE__ )
return tmp_dir
class snake_case_ ( __A ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] ) ->int:
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES )
snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES )
snake_case_ = 4
snake_case_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
snake_case_, snake_case_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
snake_case_ = SeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , )
snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
snake_case_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Any:
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES )
snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES )
snake_case_ = 4
snake_case_ = LegacySeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=2_0 , max_target_length=_UpperCamelCase , )
snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case__( self : List[Any] ) ->List[Any]:
snake_case_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
snake_case_ = tmp_dir.joinpath('''train.source''' ).open().readlines()
snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_UpperCamelCase , _UpperCamelCase , 1_2_8 , _UpperCamelCase )
snake_case_ = {x.name for x in tmp_dir.iterdir()}
snake_case_ = {x.name for x in save_dir.iterdir()}
snake_case_ = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCamelCase ) < len(_UpperCamelCase )
assert len(_UpperCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(_UpperCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def snake_case__( self : Union[str, Any] ) ->Optional[Any]:
if not FAIRSEQ_AVAILABLE:
return
snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=6_4 )
snake_case_ = 6_4
snake_case_ = ds.make_dynamic_sampler(_UpperCamelCase , required_batch_size_multiple=_UpperCamelCase )
snake_case_ = [len(_UpperCamelCase ) for x in batch_sampler]
assert len(set(_UpperCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCamelCase ) == len(_UpperCamelCase ) # no dropped or added examples
snake_case_ = DataLoader(_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
snake_case_ = []
snake_case_ = []
for batch in data_loader:
snake_case_ = batch['''input_ids'''].shape
snake_case_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
snake_case_ = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(_UpperCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCamelCase )
assert num_src_per_batch[0] == max(_UpperCamelCase )
if failures:
raise AssertionError(f'''too many tokens in {len(_UpperCamelCase )} batches''' )
def snake_case__( self : List[str] ) ->int:
snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=5_1_2 )
snake_case_ = 2
snake_case_ = ds.make_sortish_sampler(_UpperCamelCase , shuffle=_UpperCamelCase )
snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCamelCase )
snake_case_ = tokenizer.pad_token_id
def count_pad_tokens(_UpperCamelCase : str , _UpperCamelCase : List[str]="input_ids" ):
return [batch[k].eq(_UpperCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) ) < sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) )
assert sum(count_pad_tokens(_UpperCamelCase ) ) < sum(count_pad_tokens(_UpperCamelCase ) )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any]=1_0_0_0 , _UpperCamelCase : List[Any]=1_2_8 ) ->List[Any]:
if os.getenv('''USE_REAL_DATA''' , _UpperCamelCase ):
snake_case_ = '''examples/seq2seq/wmt_en_ro'''
snake_case_ = max_len * 2 * 6_4
if not Path(_UpperCamelCase ).joinpath('''train.len''' ).exists():
save_len_file(_UpperCamelCase , _UpperCamelCase )
else:
snake_case_ = '''examples/seq2seq/test_data/wmt_en_ro'''
snake_case_ = max_len * 4
save_len_file(_UpperCamelCase , _UpperCamelCase )
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
snake_case_ = SeqaSeqDataset(
_UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , n_obs=_UpperCamelCase , )
return ds, max_tokens, tokenizer
def snake_case__( self : Union[str, Any] ) ->Optional[int]:
snake_case_, snake_case_, snake_case_ = self._get_dataset()
snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCamelCase ) )
snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCamelCase ) )
assert idsa.intersection(_UpperCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case__( self : List[str] , _UpperCamelCase : Tuple ) ->Optional[Any]:
snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase )
if tok_name == MBART_TINY:
snake_case_ = SeqaSeqDataset(
_UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
snake_case_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
snake_case_ = SeqaSeqDataset(
_UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
snake_case_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCamelCase ) == 1 if tok_name == BART_TINY else len(_UpperCamelCase ) == 0 | 8 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """"""
lowercase__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str], lowerCamelCase : Optional[DatasetInfo] = None, lowerCamelCase : Optional[str] = None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(self, **lowerCamelCase )
lowercase__ = repo_info
lowercase__ = token
lowercase__ = None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
lowercase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowerCamelCase ): {'''name''': str(lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : str = "rb", **lowerCamelCase : Any, ):
'''simple docstring'''
if not isinstance(self.repo_info, lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowercase__ = hf_hub_url(self.repo_info.id, lowerCamelCase, revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase, mode=lowerCamelCase, headers=get_authentication_headers_for_url(lowerCamelCase, use_auth_token=self.token ), client_kwargs={'''trust_env''': True}, ).open()
def lowercase__ ( self : Dict, lowerCamelCase : Any, **lowerCamelCase : int ):
'''simple docstring'''
self._get_dirs()
lowercase__ = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=False, **lowerCamelCase : str ):
'''simple docstring'''
self._get_dirs()
lowercase__ = PurePosixPath(path.strip('''/''' ) )
lowercase__ = {}
for p, f in self.dir_cache.items():
lowercase__ = PurePosixPath(p.strip('''/''' ) )
lowercase__ = p.parent
if root == path:
lowercase__ = f
lowercase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 207 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> YolosConfig:
"""simple docstring"""
_UpperCamelCase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_UpperCamelCase : List[Any] = 192
_UpperCamelCase : Union[str, Any] = 768
_UpperCamelCase : int = 12
_UpperCamelCase : Any = 3
_UpperCamelCase : Optional[int] = [800, 1_333]
_UpperCamelCase : Any = False
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : Optional[Any] = 330
_UpperCamelCase : Optional[int] = 14
_UpperCamelCase : Union[str, Any] = 6
_UpperCamelCase : Tuple = 1_320
elif "yolos_s" in yolos_name:
_UpperCamelCase : str = 384
_UpperCamelCase : List[str] = 1_536
_UpperCamelCase : Any = 12
_UpperCamelCase : int = 6
elif "yolos_b" in yolos_name:
_UpperCamelCase : Union[str, Any] = [800, 1_344]
_UpperCamelCase : Union[str, Any] = 91
_UpperCamelCase : Any = "huggingface/label-files"
_UpperCamelCase : Union[str, Any] = "coco-detection-id2label.json"
_UpperCamelCase : List[Any] = json.load(open(hf_hub_download(lowercase_ ,lowercase_ ,repo_type="dataset" ) ,"r" ) )
_UpperCamelCase : Tuple = {int(lowercase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCamelCase : int = in_proj_bias[: config.hidden_size]
_UpperCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase : Union[str, Any] = in_proj_weight[-config.hidden_size :, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "backbone" in name:
_UpperCamelCase : Tuple = name.replace("backbone" ,"vit" )
if "cls_token" in name:
_UpperCamelCase : Optional[int] = name.replace("cls_token" ,"embeddings.cls_token" )
if "det_token" in name:
_UpperCamelCase : Union[str, Any] = name.replace("det_token" ,"embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_UpperCamelCase : Optional[int] = name.replace("mid_pos_embed" ,"encoder.mid_position_embeddings" )
if "pos_embed" in name:
_UpperCamelCase : List[Any] = name.replace("pos_embed" ,"embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_UpperCamelCase : Tuple = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "blocks" in name:
_UpperCamelCase : List[str] = name.replace("blocks" ,"encoder.layer" )
if "attn.proj" in name:
_UpperCamelCase : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
_UpperCamelCase : Any = name.replace("attn" ,"attention.self" )
if "norm1" in name:
_UpperCamelCase : Optional[int] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
_UpperCamelCase : List[str] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
_UpperCamelCase : Tuple = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
_UpperCamelCase : Dict = name.replace("mlp.fc2" ,"output.dense" )
if "class_embed" in name:
_UpperCamelCase : Any = name.replace("class_embed" ,"class_labels_classifier" )
if "bbox_embed" in name:
_UpperCamelCase : Any = name.replace("bbox_embed" ,"bbox_predictor" )
if "vit.norm" in name:
_UpperCamelCase : Any = name.replace("vit.norm" ,"vit.layernorm" )
return name
def lowercase__ ( lowercase_ ,lowercase_ ) -> dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Tuple = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
_UpperCamelCase : Any = key.split("." )
_UpperCamelCase : int = int(key_split[2] )
_UpperCamelCase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_UpperCamelCase : Tuple = val[:dim, :]
_UpperCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
_UpperCamelCase : Union[str, Any] = val[-dim:, :]
else:
_UpperCamelCase : Optional[int] = val[:dim]
_UpperCamelCase : List[Any] = val[dim : dim * 2]
_UpperCamelCase : Any = val[-dim:]
else:
_UpperCamelCase : Dict = val
return orig_state_dict
def lowercase__ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCamelCase : Union[str, Any] = Image.open(requests.get(lowercase_ ,stream=lowercase_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = False ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = get_yolos_config(lowercase_ )
# load original state_dict
_UpperCamelCase : Dict = torch.load(lowercase_ ,map_location="cpu" )["model"]
# load 🤗 model
_UpperCamelCase : int = YolosForObjectDetection(lowercase_ )
model.eval()
_UpperCamelCase : Optional[Any] = convert_state_dict(lowercase_ ,lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by YolosImageProcessor
_UpperCamelCase : Tuple = 800 if yolos_name != "yolos_ti" else 512
_UpperCamelCase : Tuple = YolosImageProcessor(format="coco_detection" ,size=lowercase_ )
_UpperCamelCase : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="pt" )
_UpperCamelCase : Dict = model(**lowercase_ )
_UpperCamelCase : Union[str, Any] = outputs.logits, outputs.pred_boxes
_UpperCamelCase : Optional[int] = None, None
if yolos_name == "yolos_ti":
_UpperCamelCase : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_UpperCamelCase : int = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_UpperCamelCase : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_UpperCamelCase : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_UpperCamelCase : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_UpperCamelCase : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_UpperCamelCase : Dict = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_UpperCamelCase : Optional[int] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_UpperCamelCase : Any = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_UpperCamelCase : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] ,lowercase_ ,atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] ,lowercase_ ,atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
_UpperCamelCase : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_UpperCamelCase : List[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(lowercase_ ,organization="hustvl" )
model.push_to_hub(lowercase_ ,organization="hustvl" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCamelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = GPTaTokenizer
SCREAMING_SNAKE_CASE__ :Tuple = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ :Dict = True
SCREAMING_SNAKE_CASE__ :int = {"add_prefix_space": True}
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
_UpperCamelCase : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : str = {"unk_token": "<unk>"}
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def __SCREAMING_SNAKE_CASE ( self : Any , **__a : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , **__a : Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> Tuple:
_UpperCamelCase : List[Any] = "lower newer"
_UpperCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
_UpperCamelCase : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : Any = tokenizer.tokenize(__a , add_prefix_space=__a )
self.assertListEqual(__a , __a )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Any = self.get_tokenizer()
_UpperCamelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = "lower newer"
# Testing tokenization
_UpperCamelCase : str = tokenizer.tokenize(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
_UpperCamelCase : Optional[Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
_UpperCamelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__a )
_UpperCamelCase : List[Any] = tokenizer.encode(__a , add_prefix_space=__a )
_UpperCamelCase : List[str] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# Testing the unknown token
_UpperCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__a ) , __a )
def __SCREAMING_SNAKE_CASE ( self : int , *__a : int , **__a : List[Any] ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : int=15 ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
_UpperCamelCase : Optional[int] = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Dict = ("This is a simple input", "This is a pair")
_UpperCamelCase : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="max_length" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="max_length" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
_UpperCamelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Union[str, Any] = "This is a simple input"
_UpperCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : str = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
_UpperCamelCase : str = tokenizer(__a , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Tuple = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
_UpperCamelCase : str = tokenizer(*__a , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : Optional[int] = tokenizer(__a , padding=__a , truncate=__a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
_UpperCamelCase : Any = "$$$"
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__a , add_bos_token=__a )
_UpperCamelCase : int = "This is a simple input"
_UpperCamelCase : Tuple = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Union[str, Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(__a )
_UpperCamelCase : Optional[Any] = tokenizer(__a )
self.assertEqual(out_s.input_ids[0] , __a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_UpperCamelCase : Optional[Any] = [self.get_tokenizer(do_lower_case=__a , add_bos_token=__a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_UpperCamelCase : Tuple = "Encode this."
_UpperCamelCase : List[str] = "This one too please."
_UpperCamelCase : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
encoded_sequence += tokenizer.encode(__a , add_special_tokens=__a )
_UpperCamelCase : int = tokenizer.encode_plus(
__a , __a , add_special_tokens=__a , return_special_tokens_mask=__a , )
_UpperCamelCase : str = encoded_sequence_dict["input_ids"]
_UpperCamelCase : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__a ) , len(__a ) )
_UpperCamelCase : Union[str, Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__a )
]
_UpperCamelCase : Union[str, Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(__a , __a )
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Any = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("test_opt" )
_UpperCamelCase : str = AutoTokenizer.from_pretrained("./test_opt" )
_UpperCamelCase : Optional[Any] = tokenizer.encode(
__a , )
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
_UpperCamelCase : int = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=__a )
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : Union[str, Any] = tokenizer.encode(
__a , )
# Same as above
self.assertEqual(__a , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=__a )
_UpperCamelCase : List[str] = "bos"
_UpperCamelCase : Tuple = tokenizer.get_vocab()["bos"]
_UpperCamelCase : List[Any] = "A photo of a cat"
_UpperCamelCase : List[Any] = tokenizer.encode(
__a , )
# We changed the bos token
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("./tok" )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
_UpperCamelCase : Tuple = tokenizer.encode(
__a , )
self.assertEqual(__a , [3_1957, 250, 1345, 9, 10, 4758] )
| 310 | 0 |
def __magic_name__ ( A : list ):
'''simple docstring'''
def merge(A : list, A : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A ) <= 1:
return collection
a = len(A ) // 2
return merge(merge_sort(collection[:mid] ), merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 107 |
import numpy as np
def __magic_name__ ( A : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __magic_name__ ( A : np.array ):
'''simple docstring'''
return vector * sigmoid(1.7_02 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 1 |
import sys
UpperCAmelCase : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE_ )
return product
def __lowerCamelCase ( lowerCamelCase__ : str = N ):
'''simple docstring'''
lowerCamelCase = -sys.maxsize - 1
lowerCamelCase = n[:13]
lowerCamelCase = 13
while cur_index < len(SCREAMING_SNAKE_CASE_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCamelCase = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCamelCase = max(SCREAMING_SNAKE_CASE_ , str_eval(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 363 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
if start < end:
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase , lowerCamelCase = _in_place_partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += _in_place_quick_sort(lowerCamelCase__ , lowerCamelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase__ , p + 1 , lowerCamelCase__ )
return count
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase = start - 1
for index in range(lowerCamelCase__ , lowerCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase = new_pivot_index + 1
lowerCamelCase = a[new_pivot_index]
lowerCamelCase = a[index]
lowerCamelCase = temp
lowerCamelCase = a[new_pivot_index + 1]
lowerCamelCase = a[end]
lowerCamelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_00 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : Optional[int] = 0, 1 # mean and standard deviation
UpperCAmelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[Any] = np.load(outfile)
UpperCAmelCase : Optional[Any] = len(M) - 1
UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 66 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = -1
SCREAMING_SNAKE_CASE : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Union[str, Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE : Dict = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Tuple = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE : Tuple = TextIteratorStreamer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE : Optional[Any] = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
SCREAMING_SNAKE_CASE : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Any = TextStreamer(lowerCamelCase_ , skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE : Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : Any = torch.ones((1, 5) , device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE : Dict = TextStreamer(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=1 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE : List[str] = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(lowerCamelCase_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = TextIteratorStreamer(lowerCamelCase_ , timeout=0.001 )
SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE : Any = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 323 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : int = False
def lowerCamelCase__ ( self : List[Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Tuple=False ) -> List[Any]:
__UpperCAmelCase : Dict = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
__UpperCAmelCase : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class a ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : str , snake_case : Any=13 , snake_case : List[str]=7 , snake_case : Optional[int]=True , snake_case : List[Any]=True , snake_case : Optional[int]=True , snake_case : List[Any]=True , snake_case : Any=99 , snake_case : str=32 , snake_case : Any=32 , snake_case : int=2 , snake_case : Union[str, Any]=4 , snake_case : List[str]=37 , snake_case : List[str]="gelu" , snake_case : int=0.1 , snake_case : int=0.1 , snake_case : List[Any]=512 , snake_case : Union[str, Any]=16 , snake_case : Optional[Any]=2 , snake_case : Optional[Any]=0.02 , snake_case : Union[str, Any]=3 , snake_case : Tuple=4 , snake_case : str=None , ) -> List[Any]:
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Tuple = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : List[str] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : Tuple = scope
__UpperCAmelCase : Union[str, Any] = embedding_size
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Any = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ) -> Any:
__UpperCAmelCase : Tuple = TFMobileBertModel(config=_a )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : List[str] = model(_a )
__UpperCAmelCase : Optional[int] = [input_ids, input_mask]
__UpperCAmelCase : Any = model(_a )
__UpperCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[str] ) -> List[Any]:
__UpperCAmelCase : int = TFMobileBertForMaskedLM(config=_a )
__UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Any , snake_case : str , snake_case : Tuple , snake_case : Any , snake_case : List[str] , snake_case : Any ) -> Tuple:
__UpperCAmelCase : List[str] = TFMobileBertForNextSentencePrediction(config=_a )
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Any , snake_case : List[str] , snake_case : str , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Tuple , snake_case : Tuple ) -> Optional[int]:
__UpperCAmelCase : int = TFMobileBertForPreTraining(config=_a )
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Dict = model(_a )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str , snake_case : Optional[int] ) -> List[Any]:
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : Tuple = TFMobileBertForSequenceClassification(config=_a )
__UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Union[str, Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : str , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> List[Any]:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : Optional[Any] = TFMobileBertForMultipleChoice(config=_a )
__UpperCAmelCase : Dict = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__UpperCAmelCase : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : int = TFMobileBertForTokenClassification(config=_a )
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : str , snake_case : int , snake_case : List[str] , snake_case : List[str] ) -> Any:
__UpperCAmelCase : str = TFMobileBertForQuestionAnswering(config=_a )
__UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__UpperCAmelCase : Tuple = model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Any ) -> str:
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Any = config_and_inputs
__UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def lowerCamelCase__ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_a )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_a )
def lowerCamelCase__ ( self : Dict ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_a )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_a )
@slow
def lowerCamelCase__ ( self : List[str] ) -> int:
for model_name in ["google/mobilebert-uncased"]:
__UpperCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Any:
__UpperCAmelCase : List[str] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__UpperCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase : Tuple = model(_a )[0]
__UpperCAmelCase : List[Any] = [1, 6, 3_0522]
self.assertEqual(output.shape , _a )
__UpperCAmelCase : int = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-4 ) | 362 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> int:
# test for the above condition
self.test()
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = False
while not completed:
if counter == 1:
self.reset()
__UpperCAmelCase : Optional[int] = self.advance()
if not self.does_advance(snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.update(snake_case )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : List[Any] , snake_case : int ) -> int:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Tuple:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any]=False ) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a ( _a ):
"""simple docstring"""
def __init__( self : int , snake_case : List[int] ) -> Tuple:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__UpperCAmelCase : Dict = token_ids
__UpperCAmelCase : Tuple = len(self.token_ids )
__UpperCAmelCase : List[str] = -1 # the index of the currently fulfilled step
__UpperCAmelCase : int = False
def lowerCamelCase__ ( self : List[str] ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
if self.does_advance(snake_case ):
self.fulfilled_idx += 1
__UpperCAmelCase : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = completed
else:
# failed to make progress.
__UpperCAmelCase : List[str] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Union[str, Any] = 0
def lowerCamelCase__ ( self : str ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self : int , snake_case : Dict=False ) -> List[str]:
__UpperCAmelCase : List[str] = PhrasalConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : int = self.seqlen
__UpperCAmelCase : Optional[Any] = self.fulfilled_idx
__UpperCAmelCase : List[Any] = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[List[int]] , snake_case : Dict=True ) -> Any:
__UpperCAmelCase : List[Any] = max([len(snake_case ) for one in nested_token_ids] )
__UpperCAmelCase : Union[str, Any] = {}
for token_ids in nested_token_ids:
__UpperCAmelCase : List[str] = root
for tidx, token_id in enumerate(snake_case ):
if token_id not in level:
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = level[token_id]
if no_subsets and self.has_subsets(snake_case , snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
__UpperCAmelCase : Tuple = root
def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = self.trie
for current_token in current_seq:
__UpperCAmelCase : List[str] = start[current_token]
__UpperCAmelCase : str = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[str] ) -> Any:
__UpperCAmelCase : Optional[Any] = self.next_tokens(snake_case )
return len(snake_case ) == 0
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : str = list(root.values() )
if len(snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(snake_case ) for nn in next_nodes] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Dict ) -> str:
__UpperCAmelCase : Dict = self.count_leaves(snake_case )
return len(snake_case ) != leaf_count
class a ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[List[int]] ) -> str:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(snake_case , snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__UpperCAmelCase : Optional[int] = DisjunctiveTrie(snake_case )
__UpperCAmelCase : Tuple = nested_token_ids
__UpperCAmelCase : List[Any] = self.trie.max_height
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.trie.next_tokens(self.current_seq )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : Tuple , snake_case : int ) -> Dict:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Tuple:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
if self.does_advance(snake_case ):
self.current_seq.append(snake_case )
__UpperCAmelCase : int = True
else:
__UpperCAmelCase : Optional[Any] = True
self.reset()
__UpperCAmelCase : Optional[Any] = self.trie.reached_leaf(self.current_seq )
__UpperCAmelCase : Tuple = completed
return stepped, completed, reset
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = []
def lowerCamelCase__ ( self : Tuple ) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self : Any , snake_case : Optional[int]=False ) -> Tuple:
__UpperCAmelCase : str = DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : Tuple = self.seqlen
__UpperCAmelCase : Dict = self.current_seq
__UpperCAmelCase : str = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Constraint] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
__UpperCAmelCase : int = max([c.seqlen for c in constraints] )
__UpperCAmelCase : int = len(snake_case )
__UpperCAmelCase : Optional[int] = False
self.init_state()
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = [constraint.copy(stateful=snake_case ) for constraint in self.constraints]
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self : Tuple ) -> int:
__UpperCAmelCase : int = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCAmelCase : Optional[int] = constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
else:
__UpperCAmelCase : Optional[Any] = self.inprogress_constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[List[int]] ) -> Optional[int]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCAmelCase , __UpperCAmelCase : Dict = self.add(snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self : List[str] , snake_case : int ) -> List[str]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
__UpperCAmelCase , __UpperCAmelCase : str = False, False
if self.completed:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.inprogress_constraint.update(snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case ) )
__UpperCAmelCase : Optional[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCAmelCase : str = None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCAmelCase : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(snake_case ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pending_constraint.update(snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(snake_case )
__UpperCAmelCase : Tuple = None
if not complete and stepped:
__UpperCAmelCase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCAmelCase : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCAmelCase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self : int , snake_case : Optional[int]=True ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCAmelCase : str = [
constraint.copy(stateful=snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCAmelCase : Union[str, Any] = self.inprogress_constraint.copy(stateful=snake_case )
__UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 240 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
A__ = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _UpperCAmelCase ( snake_case , snake_case , snake_case=8 ):
"""simple docstring"""
_lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( snake_case , snake_case=5_12 , snake_case=5_12 ):
"""simple docstring"""
_lowerCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCAmelCase = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase = arr.astype(np.floataa ) / 127.5 - 1
_lowerCAmelCase = np.transpose(snake_case , [2, 0, 1] )
_lowerCAmelCase = torch.from_numpy(snake_case ).unsqueeze(0 )
return image
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
_lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = min(int(num_inference_steps * strength ) , _snake_case )
_lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if not isinstance(_snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_snake_case )}' )
_lowerCAmelCase = image.to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase = image
else:
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_snake_case )
]
_lowerCAmelCase = torch.cat(_snake_case , dim=0 )
else:
_lowerCAmelCase = self.movq.encode(_snake_case ).latent_dist.sample(_snake_case )
_lowerCAmelCase = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase = init_latents.shape
_lowerCAmelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
# get latents
_lowerCAmelCase = self.scheduler.add_noise(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase = init_latents
return latents
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
_lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
_lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self ):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = 512 , _snake_case = 512 , _snake_case = 100 , _snake_case = 4.0 , _snake_case = 0.3 , _snake_case = 1 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
"""simple docstring"""
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = torch.cat(_snake_case , dim=0 )
_lowerCAmelCase = image_embeds.shape[0]
if isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = torch.cat(_snake_case , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase = image_embeds.repeat_interleave(_snake_case , dim=0 )
_lowerCAmelCase = negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_snake_case )
if not isinstance(_snake_case , _snake_case ):
_lowerCAmelCase = [image]
if not all(isinstance(_snake_case , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(_snake_case ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
_lowerCAmelCase = torch.cat([prepare_image(_snake_case , _snake_case , _snake_case ) for i in image] , dim=0 )
_lowerCAmelCase = image.to(dtype=image_embeds.dtype , device=_snake_case )
_lowerCAmelCase = self.movq.encode(_snake_case )["""latents"""]
_lowerCAmelCase = latents.repeat_interleave(_snake_case , dim=0 )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.get_timesteps(_snake_case , _snake_case , _snake_case )
_lowerCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor )
_lowerCAmelCase = self.prepare_latents(
_snake_case , _snake_case , _snake_case , _snake_case , image_embeds.dtype , _snake_case , _snake_case )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = {"""image_embeds""": image_embeds}
_lowerCAmelCase = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
_lowerCAmelCase = self.movq.decode(_snake_case , force_not_quantize=_snake_case )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCAmelCase = image * 0.5 + 0.5
_lowerCAmelCase = image.clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 82 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase__ : int = 25_00_04
lowerCamelCase__ : Any = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Optional[int] = MBartTokenizer
__a : Union[str, Any] = MBartTokenizerFast
__a : Union[str, Any] = True
__a : Union[str, Any] = True
def __snake_case ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : str = MBartTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict = MBartTokenizer(_A , keep_accents=_A )
_UpperCAmelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(_A , **_A )
_UpperCAmelCase : Dict = tempfile.mkdtemp()
_UpperCAmelCase : str = tokenizer_r.save_pretrained(_A )
_UpperCAmelCase : str = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : int = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Any = tokenizer_r.save_pretrained(_A , legacy_format=_A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : Any = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(_A , legacy_format=_A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(_A )
_UpperCAmelCase : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
__a : Optional[Any] = """facebook/mbart-large-en-ro"""
__a : Dict = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__a : List[str] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__a : int = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
_UpperCAmelCase : Tuple = 1
return cls
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_00_20 )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def __snake_case ( self ) -> str:
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
_UpperCAmelCase : List[Any] = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
_UpperCAmelCase : str = self.tokenizer.decode(_A , skip_special_tokens=_A )
_UpperCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _A )
_UpperCAmelCase : str = 10
_UpperCAmelCase : str = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _A )
self.assertEqual(len(_A ) , _A )
def __snake_case ( self ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_00_26, 25_00_01] )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
_UpperCAmelCase : Any = MBartTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors="""pt""" )
_UpperCAmelCase : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_UpperCAmelCase : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors="""pt""" )
_UpperCAmelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=10 , return_tensors="""pt""" )
_UpperCAmelCase : str = targets["""input_ids"""]
_UpperCAmelCase : List[Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 30_34, 2, 25_00_04]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_00_01,
} , )
| 246 | 0 |
'''simple docstring'''
from __future__ import annotations
class snake_case__ :
def __init__( self : Dict , __a : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(__lowercase ) != 0:
__snake_case : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowercase ) != cols:
raise error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise error
__snake_case : int = rows
else:
__snake_case : Optional[Any] = []
def A_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return len(self.rows )
@property
def A_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return len(self.rows[0] )
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.order[0] == self.order[1]
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowercase )
def A_ ( self : Dict ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return bool(self.determinant() )
def A_ ( self : Any , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowercase ).determinant()
def A_ ( self : Union[str, Any] , __a : str , __a : List[str] ) -> List[str]:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(__lowercase , __lowercase )
return -1 * self.get_minor(__lowercase , __lowercase )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return Matrix(
[
[self.get_minor(__lowercase , __lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowercase )
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return str(self.rows )
def __str__( self : str ) -> Union[str, Any]:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(__lowercase ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def A_ ( self : int , __a : List[str] , __a : Union[str, Any] = None ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in row:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(__lowercase )
else:
__snake_case : Dict = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self : Optional[Any] , __a : int , __a : Dict = None ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(__lowercase , __lowercase ):
raise type_error
for value in column:
if not isinstance(__lowercase , (int, float) ):
raise type_error
if len(__lowercase ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
__snake_case : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__snake_case : Any = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Any , __a : Any ) -> Any:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Dict , __a : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return not self == other
def __neg__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return self * -1
def __add__( self : Tuple , __a : int ) -> Union[str, Any]:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Dict , __a : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , __a : Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowercase , __lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(__lowercase , __lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : List[Any] , __a : int ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
__snake_case : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls : List[Any] , __a : Optional[Any] , __a : Optional[Any] ) -> Dict:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(__lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = tempfile.mkdtemp()
# fmt: off
__snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : List[str] = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : Optional[int] , **__a : Dict ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : int , **__a : Dict ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Dict = self.get_image_processor()
__snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(__a , return_tensors='np' )
__snake_case : List[str] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = 'lower newer'
__snake_case : Dict = processor(text=__a )
__snake_case : List[Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : List[Any] = 'lower newer'
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__a ):
processor()
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : int = processor.batch_decode(__a )
__snake_case : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
_UpperCAmelCase = [int(__lowerCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_a = input().strip()
_a = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 39 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39 | 1 |
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 371 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
snake_case__ : Optional[int] = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
snake_case__ : Optional[Any] = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
snake_case__ : List[str] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : Union[str, Any]="binary" , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Dict="warn" , ):
lowerCAmelCase : Optional[Any] = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 60 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict =logging.get_logger(__name__)
UpperCAmelCase__ : Any ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( __UpperCamelCase ):
__A = """vit_msn"""
def __init__( self , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-06 , UpperCAmelCase_=224 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =image_size
lowerCamelCase =patch_size
lowerCamelCase =num_channels
lowerCamelCase =qkv_bias
| 351 |
import qiskit
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCamelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase =qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase =qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 262 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : Tuple = int(A_ )
lowerCAmelCase : Union[str, Any] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str]=3_0_0 ):
'''simple docstring'''
return f"""\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n """
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : List[str] = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCAmelCase : Any = f"""{elt:.6f}""" if isinstance(A_ , A_ ) else str(A_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Union[str, Any] =5
a : Tuple =0.2
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = True , snake_case__ = None , snake_case__ = 300 , ):
"""simple docstring"""
lowerCAmelCase : Tuple = total
lowerCAmelCase : Optional[int] = "" if prefix is None else prefix
lowerCAmelCase : Optional[Any] = leave
lowerCAmelCase : str = parent
lowerCAmelCase : Tuple = width
lowerCAmelCase : str = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : int = None
def lowercase__ ( self , snake_case__ , snake_case__ = False , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = value
if comment is not None:
lowerCAmelCase : Tuple = comment
if self.last_value is None:
lowerCAmelCase : Union[str, Any] = time.time()
lowerCAmelCase : int = value
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = self.warmup
lowerCAmelCase : Optional[int] = 1
self.update_bar(__lowerCamelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Union[str, Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCAmelCase : Optional[int] = self.elapsed_time / (value - self.start_value)
else:
lowerCAmelCase : Optional[int] = None
if value >= self.total:
lowerCAmelCase : str = self.total
lowerCAmelCase : Optional[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCAmelCase : List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(__lowerCamelCase )
lowerCAmelCase : Union[str, Any] = value
lowerCAmelCase : List[str] = current_time
if self.average_time_per_item is None:
lowerCAmelCase : List[Any] = 1
else:
lowerCAmelCase : Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = " " * (len(str(self.total ) ) - len(str(__lowerCamelCase ) )) + str(__lowerCamelCase )
if self.elapsed_time is None:
lowerCAmelCase : str = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
lowerCAmelCase : Any = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
lowerCAmelCase : str = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCAmelCase : Any = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self ):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase : str = None if column_names is None else [column_names]
lowerCAmelCase : int = None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCAmelCase : Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=__lowerCamelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.inner_table is None:
lowerCAmelCase : List[str] = [list(values.keys() ), list(values.values() )]
else:
lowerCAmelCase : Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowerCamelCase )
lowerCAmelCase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def lowercase__ ( self , snake_case__ , snake_case__=None , snake_case__=300 ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = NotebookProgressBar(__lowerCamelCase , prefix=__lowerCamelCase , parent=self , width=__lowerCamelCase )
return self.child_bar
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = None
self.display()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Dict = None
lowerCAmelCase : Any = None
lowerCAmelCase : Union[str, Any] = False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Tuple = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
lowerCAmelCase : Optional[Any] = NotebookTrainingTracker(state.max_steps , __lowerCamelCase )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
lowerCAmelCase : str = False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if not has_length(__lowerCamelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCAmelCase : Union[str, Any] = self.training_tracker.add_child(len(__lowerCamelCase ) )
else:
lowerCAmelCase : Optional[int] = NotebookProgressBar(len(__lowerCamelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCAmelCase : List[Any] = None
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCAmelCase : List[str] = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCAmelCase : List[Any] = state.global_step
self.training_tracker.write_line(__lowerCamelCase )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if self.training_tracker is not None:
lowerCAmelCase : int = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
lowerCAmelCase : Union[str, Any] = log["loss"]
break
if self.first_column == "Epoch":
lowerCAmelCase : Optional[int] = int(state.epoch )
else:
lowerCAmelCase : Dict = state.global_step
lowerCAmelCase : List[str] = "eval"
for k in metrics:
if k.endswith("_loss" ):
lowerCAmelCase : Dict = re.sub(r"\_loss$" , "" , __lowerCamelCase )
lowerCAmelCase : Optional[int] = metrics.pop("total_flos" , __lowerCamelCase )
lowerCAmelCase : Optional[int] = metrics.pop("epoch" , __lowerCamelCase )
lowerCAmelCase : str = metrics.pop(f"""{metric_key_prefix}_runtime""" , __lowerCamelCase )
lowerCAmelCase : Dict = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __lowerCamelCase )
lowerCAmelCase : Dict = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __lowerCamelCase )
lowerCAmelCase : int = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __lowerCamelCase )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
lowerCAmelCase : Union[str, Any] = v
else:
lowerCAmelCase : Optional[Any] = k.split("_" )
lowerCAmelCase : Union[str, Any] = " ".join([part.capitalize() for part in splits[1:]] )
lowerCAmelCase : Optional[int] = v
self.training_tracker.write_line(__lowerCamelCase )
self.training_tracker.remove_child()
lowerCAmelCase : str = None
# Evaluation takes a long time so we should force the next update.
lowerCAmelCase : List[Any] = True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__lowerCamelCase )
lowerCAmelCase : Optional[Any] = None
| 108 |
import doctest
from collections import deque
import numpy as np
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = [2, 1, 2, -1]
UpperCamelCase__: Dict = [1, 2, 3, 4]
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = len(self.first_signal )
UpperCamelCase__: Optional[Any] = len(self.second_signal )
UpperCamelCase__: str = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
UpperCamelCase__: List[str] = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__: int = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 149 | 0 |
from __future__ import annotations
__A : Any = list[tuple[int, int]]
__A : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )-> Union[str, Any]:
lowerCamelCase_ =pos_x
lowerCamelCase_ =pos_y
lowerCamelCase_ =(pos_y, pos_x)
lowerCamelCase_ =goal_x
lowerCamelCase_ =goal_y
lowerCamelCase_ =g_cost
lowerCamelCase_ =parent
lowerCamelCase_ =self.calculate_heuristic()
def _snake_case ( self )-> float:
lowerCamelCase_ =abs(self.pos_x - self.goal_x )
lowerCamelCase_ =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE )-> bool:
return self.f_cost < other.f_cost
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[self.start]
lowerCamelCase_ =[]
lowerCamelCase_ =False
def _snake_case ( self )-> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase_ =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ =True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
lowerCamelCase_ =self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> list[Node]:
lowerCamelCase_ =[]
for action in delta:
lowerCamelCase_ =parent.pos_x + action[1]
lowerCamelCase_ =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Path:
lowerCamelCase_ =node
lowerCamelCase_ =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__A : str = (0, 0)
__A : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__A : int = GreedyBestFirst(init, goal)
__A : str = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__A : int = 2
for elem in grid:
print(elem)
| 49 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )-> Dict:
lowerCamelCase_ =None
lowerCamelCase_ =20
lowerCamelCase_ =self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowerCamelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ =jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )-> Any:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create ramp distribution
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ =5
lowerCamelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ =top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ =np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ =ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowerCamelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ =5
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =15
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ =1
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> int:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =5
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ =4
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 | 1 |
import torch
from diffusers import DiffusionPipeline
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
def __call__( self : int ) -> List[Any]:
__lowerCamelCase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__lowerCamelCase = 1
__lowerCamelCase = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
__lowerCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
__lowerCamelCase = scheduler_output - scheduler_output + torch.ones_like(SCREAMING_SNAKE_CASE__ )
return result
| 270 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__A = "hopper-medium-v2"
__A = gym.make(env_name)
__A = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__A = env.reset()
__A = 0
__A = 0
__A = 1000
__A = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A = pipeline(obs, planning_horizon=32)
# execute action in environment
__A , __A , __A , __A = env.step(denorm_actions)
__A = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__A = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 108 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = """dummy_data"""
SCREAMING_SNAKE_CASE_ : List[Any] = """datasets"""
SCREAMING_SNAKE_CASE_ : Any = False
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[Version, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[List[Callable]] = None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Tuple = dataset_name
__lowerCAmelCase: Optional[Any] = cache_dir
__lowerCAmelCase: Optional[int] = use_local_dummy_data
__lowerCAmelCase: Optional[Any] = config
# download_callbacks take a single url as input
__lowerCAmelCase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCAmelCase: Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCAmelCase: List[str] = str(UpperCamelCase__)
# to be downloaded
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = None
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._dummy_file is None:
__lowerCAmelCase: Tuple = self.download_dummy_data()
return self._dummy_file
@property
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCAmelCase: str = cached_path(
UpperCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase__ , force_extract=UpperCamelCase__)
return os.path.join(UpperCamelCase__ , self.dummy_file_name)
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
if self._bucket_url is None:
__lowerCAmelCase: int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def lowercase_ ( self : List[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCAmelCase: List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCAmelCase: str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return self.create_dummy_data_dict(UpperCamelCase__ , UpperCamelCase__)
elif isinstance(UpperCamelCase__ , (list, tuple)):
return self.create_dummy_data_list(UpperCamelCase__ , UpperCamelCase__)
else:
return self.create_dummy_data_single(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return path
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
return {}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
for single_url in single_urls:
download_callback(UpperCamelCase__)
else:
__lowerCAmelCase: Union[str, Any] = single_urls
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name)) for x in single_urls]
else:
__lowerCAmelCase: Any = single_urls
__lowerCAmelCase: Optional[int] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name))
__lowerCAmelCase: Dict = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__ , UpperCamelCase__) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__lowerCAmelCase: Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCAmelCase: Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase__)) for url in data_url)
__lowerCAmelCase: str = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__lowerCAmelCase: Optional[int] = [data_url[0]] * len(UpperCamelCase__)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: Optional[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(UpperCamelCase__)
return dummy_data_list
def lowercase_ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: List[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(UpperCamelCase__) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self : Dict , UpperCamelCase__ : str)-> int:
'''simple docstring'''
def _iter_archive_members(UpperCamelCase__ : str):
# this preserves the order of the members inside the ZIP archive
__lowerCAmelCase: Optional[Any] = Path(self.dummy_file).parent
__lowerCAmelCase: Optional[int] = path.relative_to(UpperCamelCase__)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__lowerCAmelCase: Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(UpperCamelCase__)
__lowerCAmelCase: str = Path(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = _iter_archive_members(UpperCamelCase__) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(UpperCamelCase__).as_posix(), file_path.open("rb")
def lowercase_ ( self : str , UpperCamelCase__ : str)-> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__):
if filename.startswith((".", "__")):
continue
yield os.path.join(UpperCamelCase__ , UpperCamelCase__)
| 108 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""], num_layers=predefined_args["""num_layers"""], units=predefined_args["""units"""], hidden_size=predefined_args["""hidden_size"""], max_length=predefined_args["""max_length"""], num_heads=predefined_args["""num_heads"""], scaled=predefined_args["""scaled"""], dropout=predefined_args["""dropout"""], output_attention=A_, output_all_encodings=A_, use_residual=predefined_args["""use_residual"""], activation=predefined_args.get("""activation""", """gelu""" ), layer_norm_eps=predefined_args.get("""layer_norm_eps""", A_ ), )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir(), """models""" )
__magic_name__ = _load_vocab(A_, A_, A_, cls=A_ )
__magic_name__ = nlp.model.BERTModel(
A_, len(A_ ), units=predefined_args["""units"""], embed_size=predefined_args["""embed_size"""], embed_dropout=predefined_args["""embed_dropout"""], word_embed=predefined_args["""word_embed"""], use_pooler=A_, use_token_type_embed=A_, token_type_vocab_size=predefined_args["""token_type_vocab_size"""], use_classifier=A_, use_decoder=A_, )
original_bort.load_parameters(A_, cast_dtype=A_, ignore_extra=A_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(A_ ),
}
__magic_name__ = BertConfig.from_dict(A_ )
__magic_name__ = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_, A_ ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, """word_embed.0.weight""" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, """encoder.position_weight""" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, """encoder.layer_norm.beta""" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data, f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias, f'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ = check_and_map_params(
self_output.dense.weight, f'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias, f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight, f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias, f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight, f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias, f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight, f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias, f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight, f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("""roberta-base""" )
__magic_name__ = tokenizer.encode_plus(A_ )["""input_ids"""]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=A_, token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
__magic_name__ = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(A_, return_tensors="""pt""" )
__magic_name__ = hf_bort_model(**A_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(A_, A_, atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""", A_ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 88 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 1 |
"""simple docstring"""
import sys
import turtle
def snake_case ( A__ ,A__ ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def snake_case ( A__ ,A__ ,A__ ,A__ ,):
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(A__ ,get_mid(A__ ,A__ ) ,get_mid(A__ ,A__ ) ,depth - 1 )
triangle(A__ ,get_mid(A__ ,A__ ) ,get_mid(A__ ,A__ ) ,depth - 1 )
triangle(A__ ,get_mid(A__ ,A__ ) ,get_mid(A__ ,A__ ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowerCamelCase_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowerCamelCase_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 253 |
"""simple docstring"""
from math import factorial
lowerCamelCase_ = {str(d): factorial(d) for d in range(10)}
def snake_case ( A__ ):
return sum(DIGIT_FACTORIAL[d] for d in str(A__ ) )
def snake_case ( ):
UpperCAmelCase_ : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,A__ ) if sum_of_digit_factorial(A__ ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 253 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "layer_norm" , _SCREAMING_SNAKE_CASE = False , ) -> List[str]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = only_cross_attention
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCAmelCase = AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase = AdaLayerNormZero(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_SCREAMING_SNAKE_CASE , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCAmelCase = (
AdaLayerNorm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = Attention(
query_dim=_SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , upcast_attention=_SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
# 3. Feed-forward
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = FeedForward(_SCREAMING_SNAKE_CASE , dropout=_SCREAMING_SNAKE_CASE , activation_fn=_SCREAMING_SNAKE_CASE , final_dropout=_SCREAMING_SNAKE_CASE )
# let chunk size default to None
_UpperCAmelCase = None
_UpperCAmelCase = 0
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = chunk_size
_UpperCAmelCase = dim
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Any:
"""simple docstring"""
if self.use_ada_layer_norm:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.norma(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype )
else:
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
_UpperCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCAmelCase = (
self.norma(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase = self.attna(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = attn_output + hidden_states
# 3. Feed-forward
_UpperCAmelCase = self.norma(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCAmelCase = torch.cat(
[self.ff(_SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(_SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCAmelCase = self.ff(_SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
_UpperCAmelCase = ff_output + hidden_states
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = "geglu" , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = int(dim * mult )
_UpperCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
_UpperCAmelCase = GELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , approximate='tanh' )
elif activation_fn == "geglu":
_UpperCAmelCase = GEGLU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
_UpperCAmelCase = ApproximateGELU(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for module in self.net:
_UpperCAmelCase = module(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "none" ) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = approximate
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.gelu(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , dim_out * 2 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_SCREAMING_SNAKE_CASE )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.proj(_SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , embedding_dim * 2 )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(_SCREAMING_SNAKE_CASE , 2 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = CombinedTimestepLabelEmbeddings(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.LayerNorm(_SCREAMING_SNAKE_CASE , elementwise_affine=_SCREAMING_SNAKE_CASE , eps=1e-6 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hidden_dtype=_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = emb.chunk(6 , dim=1 )
_UpperCAmelCase = self.norm(_SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __a ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1e-5 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = num_groups
_UpperCAmelCase = eps
if act_fn is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = get_activation(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = nn.Linear(_SCREAMING_SNAKE_CASE , out_dim * 2 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if self.act:
_UpperCAmelCase = self.act(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.linear(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = emb[:, :, None, None]
_UpperCAmelCase , _UpperCAmelCase = emb.chunk(2 , dim=1 )
_UpperCAmelCase = F.group_norm(_SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps )
_UpperCAmelCase = x * (1 + scale) + shift
return x
| 329 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
@slow
def snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''', return_dict=__snake_case ).to(__snake_case )
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_UpperCamelCase : List[Any] = tokenizer('''Hello there''', return_tensors='''pt''' ).input_ids
_UpperCamelCase : Optional[int] = tokenizer('''Hi I am''', return_tensors='''pt''' ).input_ids
_UpperCamelCase : List[str] = model(input_ids.to(__snake_case ), labels=labels.to(__snake_case ) ).loss
_UpperCamelCase : Optional[int] = -(labels.shape[-1] * loss.item())
_UpperCamelCase : List[Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 358 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase_ : Any = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Any = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCAmelCase__ : str = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCAmelCase__ : Optional[int] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ : List[str] = False
@property
def lowerCamelCase__( self :Any ) -> Tuple:
return 32
@property
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
return 32
@property
def lowerCamelCase__( self :Tuple ) -> List[str]:
return self.time_input_dim
@property
def lowerCamelCase__( self :int ) -> str:
return self.time_input_dim * 4
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return 1_00
@property
def lowerCamelCase__( self :int ) -> Tuple:
a__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowerCamelCase__( self :str ) -> List[str]:
torch.manual_seed(0 )
a__ = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=10_05 ,)
a__ = MultilingualCLIP(__snake_case )
a__ = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
torch.manual_seed(0 )
a__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__ = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCamelCase__( self :Any ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
a__ = self.dummy_text_encoder
a__ = self.dummy_tokenizer
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule='linear' ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=__snake_case ,set_alpha_to_one=__snake_case ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=__snake_case ,)
a__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=0 ) -> Optional[int]:
a__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(__snake_case ) ).to(__snake_case )
a__ = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__snake_case ) ).to(__snake_case )
a__ = image.cpu().permute(0 ,2 ,3 ,1 )[0]
a__ = Image.fromarray(np.uinta(__snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
a__ = np.ones((64, 64) ,dtype=np.floataa )
a__ = 0
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = pipe(**self.get_dummy_inputs(__snake_case ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(__snake_case ) ,return_dict=__snake_case ,)[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__( self :List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :int ) -> int:
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
a__ = np.ones((7_68, 7_68) ,dtype=np.floataa )
a__ = 0
a__ = 'a hat'
a__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
a__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' ,torch_dtype=torch.floataa )
a__ = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ , a__ = pipe_prior(
__snake_case ,generator=__snake_case ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
a__ = pipeline(
__snake_case ,image=__snake_case ,mask_image=__snake_case ,image_embeds=__snake_case ,negative_image_embeds=__snake_case ,generator=__snake_case ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case ,__snake_case )
| 352 |
from __future__ import annotations
class snake_case_ :
def __init__( self :List[str] ,__snake_case :int ) -> None:
a__ = data
a__ = None
a__ = None
def __lowercase ( __lowerCAmelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( __lowerCAmelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( __lowerCAmelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ): # Main function for testing.
a__ = Node(1 )
a__ = Node(2 )
a__ = Node(3 )
a__ = Node(4 )
a__ = Node(5 )
a__ = Node(6 )
a__ = Node(7 )
a__ = Node(8 )
a__ = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print('Tree is: ' )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 109 | 0 |
__A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 90 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , ) ->float:
"""simple docstring"""
__snake_case : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
__snake_case : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
__snake_case : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 361 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a :List[str] = logging.getLogger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ) -> int:
"""simple docstring"""
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = None
def _a ( self , _a ) -> int:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
SCREAMING_SNAKE_CASE__ : Dict = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE__ : Tuple = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dist.new_group(ranks=_a , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _a ( self ) -> Any:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _a ( self , _a , _a , _a=torch.floataa ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.empty(_a , dtype=_a )
dist.scatter(_a , src=0 , scatter_list=_a , group=self.process_group )
return target_tensor
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE__ : int = next((addr for addr in addrs if addr.startswith("""e""" )) , _a )
return ifname
def _a ( self , _a , _a ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self._main_retrieve(_a , _a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a )
# distributed training
SCREAMING_SNAKE_CASE__ : Optional[Any] = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self._is_main():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_a )]
dist.gather(torch.tensor(_a ) , dst=0 , gather_list=_a , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE__ : Any = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
if self._is_main():
assert len(_a ) == world_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._main_retrieve(torch.cat(_a ).numpy() , _a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(_a ), torch.tensor(_a )
SCREAMING_SNAKE_CASE__ : str = self._chunk_tensor(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._chunk_tensor(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._scattered(_a , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE__ : List[str] = self._scattered(_a , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_a )
| 132 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=UpperCAmelCase_ , )
assert hasattr(self , "env" )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
__UpperCAmelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase_ , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase_ , py_version="py36" , )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
# create estimator
__UpperCAmelCase : List[Any] = self.create_estimator(UpperCAmelCase_ )
# run training
estimator.fit()
# result dataframe
__UpperCAmelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__UpperCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__UpperCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__UpperCAmelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase_ )
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(UpperCamelCase_ ) == 1:
return True
__SCREAMING_SNAKE_CASE = series[1] - series[0]
for index in range(len(UpperCamelCase_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _lowerCAmelCase ( UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(UpperCamelCase_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__SCREAMING_SNAKE_CASE = 0
for val in series:
answer += val
return answer / len(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 66 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Dict = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> tuple[int, int]:
"""simple docstring"""
if b == 0:
return (1, 0)
((__A) , (__A)) = extended_euclid(a__ , a % b )
__A = a // b
return (y, x - k * y)
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
((__A) , (__A)) = extended_euclid(a__ , a__ )
__A = na * na
__A = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
((__A) , (__A)) = extended_euclid(a__ , a__ )
if b < 0:
__A = (b % n + n) % n
return b
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
__A , __A = invert_modulo(a__ , a__ ), invert_modulo(a__ , a__ )
__A = na * na
__A = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 161 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 0 |
import baseaa
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
return baseaa.aaaencode(string.encode('utf-8' ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
return baseaa.aaadecode(lowercase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[int] = 'distilbert'
lowerCAmelCase : Optional[int] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str ,_UpperCAmelCase : Optional[Any]=30522 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : int=False ,_UpperCAmelCase : str=6 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : Union[str, Any]=768 ,_UpperCAmelCase : Optional[Any]=4 * 768 ,_UpperCAmelCase : int=0.1 ,_UpperCAmelCase : Union[str, Any]=0.1 ,_UpperCAmelCase : Tuple="gelu" ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Dict=0.2 ,_UpperCAmelCase : Union[str, Any]=0 ,**_UpperCAmelCase : List[Any] ,):
_a : Optional[Any] = vocab_size
_a : Dict = max_position_embeddings
_a : Optional[int] = sinusoidal_pos_embds
_a : List[Any] = n_layers
_a : Any = n_heads
_a : int = dim
_a : Optional[Any] = hidden_dim
_a : str = dropout
_a : Optional[Any] = attention_dropout
_a : Any = activation
_a : Tuple = initializer_range
_a : Union[str, Any] = qa_dropout
_a : List[Any] = seq_classif_dropout
super().__init__(**_UpperCAmelCase ,pad_token_id=_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
@property
def __lowercase ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 107 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :int ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = TFAutoModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = AutoModel.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :int ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = AutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :int ) -> List[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :Optional[int] ) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :List[str] ) -> Tuple:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :int ) -> List[str]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__( self :Tuple ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
def __magic_name__( self :Any ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
__SCREAMING_SNAKE_CASE : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
| 9 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __a ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'levit'
def __init__( self : str , lowercase_ : int=224 , lowercase_ : int=3 , lowercase_ : Optional[Any]=3 , lowercase_ : Union[str, Any]=2 , lowercase_ : str=1 , lowercase_ : Any=16 , lowercase_ : str=[128, 256, 384] , lowercase_ : Dict=[4, 8, 12] , lowercase_ : str=[4, 4, 4] , lowercase_ : Dict=[16, 16, 16] , lowercase_ : Optional[Any]=0 , lowercase_ : List[Any]=[2, 2, 2] , lowercase_ : Any=[2, 2, 2] , lowercase_ : str=0.0_2 , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
UpperCamelCase__ : Optional[Any] =image_size
UpperCamelCase__ : Union[str, Any] =num_channels
UpperCamelCase__ : Tuple =kernel_size
UpperCamelCase__ : int =stride
UpperCamelCase__ : Optional[int] =padding
UpperCamelCase__ : Dict =hidden_sizes
UpperCamelCase__ : Optional[int] =num_attention_heads
UpperCamelCase__ : str =depths
UpperCamelCase__ : str =key_dim
UpperCamelCase__ : Optional[int] =drop_path_rate
UpperCamelCase__ : Optional[int] =patch_size
UpperCamelCase__ : Dict =attention_ratio
UpperCamelCase__ : int =mlp_ratio
UpperCamelCase__ : Optional[Any] =initializer_range
UpperCamelCase__ : List[Any] =[
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __a ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def _lowerCAmelCase ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCAmelCase ( self : Optional[int] ):
return 1e-4
| 353 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : List[str] =1
UpperCamelCase__ : List[str] =3
UpperCamelCase__ : Optional[Any] =(32, 32)
UpperCamelCase__ : Tuple =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase_ )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def _lowerCAmelCase ( self : str ):
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowercase_ )
@property
def _lowerCAmelCase ( self : Optional[Any] ):
def extract(*lowercase_ : Dict , **lowercase_ : List[Any] ):
class __a :
"""simple docstring"""
def __init__( self : Optional[Any] ):
UpperCamelCase__ : Dict =torch.ones([0] )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] ):
self.pixel_values.to(lowercase_ )
return self
return Out()
return extract
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Any =self.dummy_cond_unet
UpperCamelCase__ : Tuple =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : List[str] =self.dummy_text_encoder
UpperCamelCase__ : List[str] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : Tuple =77
UpperCamelCase__ : int =self.dummy_image.to(lowercase_ )
UpperCamelCase__ : Tuple =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : Union[str, Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Tuple ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : str =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , )
UpperCamelCase__ : Any =output.images
UpperCamelCase__ : Tuple =torch.Generator(device=lowercase_ ).manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , return_dict=lowercase_ , )[0]
UpperCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCamelCase__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : Optional[Any] =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : List[Any] =self.dummy_cond_unet
UpperCamelCase__ : int =PNDMScheduler(skip_prk_steps=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.dummy_vae
UpperCamelCase__ : Dict =self.dummy_text_encoder
UpperCamelCase__ : Optional[int] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
UpperCamelCase__ : List[Any] =77
UpperCamelCase__ : List[Any] =self.dummy_image.to(lowercase_ )
# put models in fp16
UpperCamelCase__ : Dict =unet.half()
UpperCamelCase__ : List[str] =vae.half()
UpperCamelCase__ : int =bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline(
unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , )
UpperCamelCase__ : Union[str, Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_ )
UpperCamelCase__ : List[Any] =alt_pipe.to(lowercase_ )
alt_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[Any] =torch.manual_seed(0 )
UpperCamelCase__ : str =alt_pipe(
[prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='''np''' , image=lowercase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ : int =init_image.resize((760, 504) )
UpperCamelCase__ : Optional[int] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : Union[str, Any] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : Dict ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : str =torch.manual_seed(0 )
UpperCamelCase__ : Any =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
UpperCamelCase__ : int =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase__ : Union[str, Any] =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase__ : List[Any] =init_image.resize((768, 512) )
UpperCamelCase__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
UpperCamelCase__ : List[str] ='''BAAI/AltDiffusion'''
UpperCamelCase__ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ : List[Any] ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : List[Any] =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe(
prompt=lowercase_ , image=lowercase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : List[Any] =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 157 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
snake_case : Dict = 4
snake_case : Optional[int] = 3
class _snake_case ( snake_case ):
pass
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for shard in shards:
for i in range(_snake_case ):
yield {"i": i, "shard": shard}
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = int(os.environ["RANK"] )
__magic_name__ : Union[str, Any] = int(os.environ["WORLD_SIZE"] )
__magic_name__ : Optional[int] = ArgumentParser()
parser.add_argument("--streaming" , type=_snake_case )
parser.add_argument("--local_rank" , type=_snake_case )
parser.add_argument("--num_workers" , type=_snake_case , default=0 )
__magic_name__ : Tuple = parser.parse_args()
__magic_name__ : Optional[Any] = args.streaming
__magic_name__ : Any = args.num_workers
__magic_name__ : Optional[int] = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(_snake_case )]}
__magic_name__ : List[Any] = IterableDataset.from_generator(_snake_case , gen_kwargs=_snake_case )
if not streaming:
__magic_name__ : Dict = Dataset.from_list(list(_snake_case ) )
__magic_name__ : List[str] = split_dataset_by_node(_snake_case , rank=_snake_case , world_size=_snake_case )
__magic_name__ : Optional[Any] = torch.utils.data.DataLoader(_snake_case , num_workers=_snake_case )
__magic_name__ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__magic_name__ : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__magic_name__ : Optional[int] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 281 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__magic_name__ : List[Any] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[Any] = use_attention_mask
__magic_name__ : Optional[Any] = use_token_type_ids
__magic_name__ : int = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Any = intermediate_size
__magic_name__ : List[Any] = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : List[Any] = num_choices
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : str = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = config_and_inputs
__magic_name__ : Tuple = True
__magic_name__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : List[str] = model(_a )[0]
__magic_name__ : str = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__magic_name__ : List[str] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_a )
__magic_name__ : Tuple = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Dict = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 281 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
A_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A_ : Optional[int] = {}
A_ : int = {}
for i, value in enumerate(_lowerCamelCase ):
A_ : Tuple = i
A_ : Optional[int] = i
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def _a ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : List[str] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_lowerCamelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _a ( self : Dict ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : str = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : Tuple = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _a ( self : Any ):
"""simple docstring"""
A_ : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A_ : Union[str, Any] = {}
for i, token in enumerate(_lowerCamelCase ):
A_ : Dict = i
A_ : List[str] = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _a ( self : int ):
"""simple docstring"""
A_ : Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A_ : List[Any] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _a ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A_ : Dict = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
A_ : int = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , '''do_lower_case''' ) else False
A_ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _a ( self : int ):
"""simple docstring"""
A_ : Tuple = ['''的''', '''人''', '''有''']
A_ : Optional[int] = ''''''.join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : List[str] = True
A_ : List[Any] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : str = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : str = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : Dict = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = False
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : int = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
A_ : List[Any] = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A_ : Optional[Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A_ : Tuple = tokenizer.encode('''你好''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode('''你是谁''' , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : Dict = '''你好,你是谁'''
A_ : List[Any] = tokenizer.tokenize(_lowerCamelCase )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
A_ : int = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
A_ : Any = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
A_ : Any = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
A_ : Optional[int] = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 350 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : int = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = "realm"
def __init__( self : Optional[Any] , UpperCAmelCase__ : int=3_0_5_2_2 , UpperCAmelCase__ : str=7_6_8 , UpperCAmelCase__ : Dict=1_2_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Union[str, Any]=3_0_7_2 , UpperCAmelCase__ : Tuple="gelu_new" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=5_1_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : Union[str, Any]=2_5_6 , UpperCAmelCase__ : Optional[int]=1_0 , UpperCAmelCase__ : Optional[Any]=1E-3 , UpperCAmelCase__ : Any=5 , UpperCAmelCase__ : List[str]=3_2_0 , UpperCAmelCase__ : str=1_3_3_5_3_7_1_8 , UpperCAmelCase__ : Dict=5_0_0_0 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Dict=2 , **UpperCAmelCase__ : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
# Common config
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = retriever_proj_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = num_candidates
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
# Reader config
__SCREAMING_SNAKE_CASE = span_hidden_size
__SCREAMING_SNAKE_CASE = max_span_width
__SCREAMING_SNAKE_CASE = reader_layer_norm_eps
__SCREAMING_SNAKE_CASE = reader_beam_size
__SCREAMING_SNAKE_CASE = reader_seq_len
# Retrieval config
__SCREAMING_SNAKE_CASE = num_block_records
__SCREAMING_SNAKE_CASE = searcher_beam_size
| 195 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 195 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[Any] = XLMProphetNetTokenizer
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Optional[Any] = True
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = '''[PAD]'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''[PAD]''')
self.assertEqual(vocab_keys[1] , '''[CLS]''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 1_012)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''')
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = '''Hello World!'''
__a = [35_389, 6_672, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE))
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 49 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
def __init__( self : int, __A : Optional[int], __A : Union[str, Any]=1_3, __A : Union[str, Any]=3_2, __A : Dict=3, __A : Dict=4, __A : Any=[1_0, 2_0, 3_0, 4_0], __A : Any=[2, 2, 3, 2], __A : Union[str, Any]=True, __A : int=True, __A : Dict=3_7, __A : Union[str, Any]="gelu", __A : int=1_0, __A : Dict=0.0_2, __A : List[str]=["stage2", "stage3", "stage4"], __A : List[Any]=3, __A : List[str]=None, ):
UpperCAmelCase : Any = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : int = image_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Tuple = num_stages
UpperCAmelCase : Union[str, Any] = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Any = out_features
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : str = scope
UpperCAmelCase : Union[str, Any] = num_stages
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def __magic_name__ ( self : Dict ):
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=5_1_2, pool_scales=[1, 2, 3, 6], use_auxiliary_head=__A, auxiliary_loss_weight=0.4, auxiliary_in_channels=4_0, auxiliary_channels=2_5_6, auxiliary_num_convs=1, auxiliary_concat_input=__A, loss_ignore_index=2_5_5, num_labels=self.num_labels, )
def __magic_name__ ( self : int, __A : Any, __A : List[str], __A : Union[str, Any] ):
UpperCAmelCase : Optional[int] = UperNetForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCamelCase = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[str] = UperNetModelTester(self )
UpperCAmelCase : Tuple = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ):
return
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(__A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : str ):
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __magic_name__ ( self : int ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __magic_name__ ( self : Optional[Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[int] ):
pass
def __magic_name__ ( self : int ):
def check_hidden_states_output(__A : Optional[int], __A : Union[str, Any], __A : Tuple ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(__A, __A ) )
UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Tuple = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = _config_zero_init(__A )
UpperCAmelCase : Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(config=__A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __magic_name__ ( self : Union[str, Any] ):
pass
@slow
def __magic_name__ ( self : Any ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Dict:
UpperCAmelCase : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCAmelCase : str = Image.open(UpperCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Dict ):
UpperCAmelCase : int = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCAmelCase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__A )
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Tuple = processor(images=__A, return_tensors='''pt''' ).to(__A )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**__A )
UpperCAmelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __A, atol=1E-4 ) )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCAmelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__A )
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Dict = processor(images=__A, return_tensors='''pt''' ).to(__A )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**__A )
UpperCAmelCase : Dict = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Dict = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __A, atol=1E-4 ) )
| 99 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase : Any = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
_lowerCamelCase : int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = NllbTokenizer
UpperCamelCase = []
UpperCamelCase = []
def __init__( self : Optional[Any], __A : Tuple=None, __A : int=None, __A : List[Any]="<s>", __A : Tuple="</s>", __A : Any="</s>", __A : Optional[Any]="<s>", __A : Tuple="<unk>", __A : str="<pad>", __A : Dict="<mask>", __A : Optional[Any]=None, __A : List[Any]=None, __A : List[Any]=None, __A : str=False, **__A : Tuple, ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : int = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else mask_token
UpperCAmelCase : str = legacy_behaviour
super().__init__(
vocab_file=__A, tokenizer_file=__A, bos_token=__A, eos_token=__A, sep_token=__A, cls_token=__A, unk_token=__A, pad_token=__A, mask_token=__A, src_lang=__A, tgt_lang=__A, additional_special_tokens=__A, legacy_behaviour=__A, **__A, )
UpperCAmelCase : Optional[int] = vocab_file
UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
UpperCAmelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : List[Any] = src_lang if src_lang is not None else '''eng_Latn'''
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Union[str, Any], __A : str ):
UpperCAmelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Any, __A : List[int], __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : Tuple, __A : List[int], __A : Optional[List[int]] = None ):
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self : str, __A : Optional[int], __A : str, __A : Optional[str], __A : Optional[str], **__A : Union[str, Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase : Optional[Any] = src_lang
UpperCAmelCase : Optional[Any] = self(__A, add_special_tokens=__A, return_tensors=__A, **__A )
UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(__A )
UpperCAmelCase : int = tgt_lang_id
return inputs
def __magic_name__ ( self : List[str], __A : List[str], __A : str = "eng_Latn", __A : Optional[List[str]] = None, __A : str = "fra_Latn", **__A : Tuple, ):
UpperCAmelCase : Any = src_lang
UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__A, __A, **__A )
def __magic_name__ ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Union[str, Any], __A : List[str] ):
UpperCAmelCase : int = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : Any = [self.cur_lang_code]
UpperCAmelCase : Optional[Any] = [self.eos_token_id]
UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __magic_name__ ( self : Dict, __A : str ):
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__A )
if self.legacy_behaviour:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Any = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : int = [self.cur_lang_code]
UpperCAmelCase : List[Any] = [self.eos_token_id]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), )
def __magic_name__ ( self : Dict, __A : str, __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCAmelCase : Tuple = os.path.join(
__A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file, __A )
return (out_vocab_file,)
| 99 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __magic_name__ ( __lowercase ):
'''simple docstring'''
__UpperCamelCase = "unispeech-sat"
def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.02 , _a=1e-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=False , _a=True , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=(512, 512, 512, 512, 1_500) , _a=(5, 3, 3, 1, 1) , _a=(1, 2, 3, 1, 1) , _a=512 , _a=0 , _a=1 , _a=2 , _a=504 , **_a , ):
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
lowerCamelCase = hidden_size
lowerCamelCase = feat_extract_norm
lowerCamelCase = feat_extract_activation
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = conv_bias
lowerCamelCase = num_conv_pos_embeddings
lowerCamelCase = num_conv_pos_embedding_groups
lowerCamelCase = len(self.conv_dim )
lowerCamelCase = num_hidden_layers
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_dropout
lowerCamelCase = attention_dropout
lowerCamelCase = activation_dropout
lowerCamelCase = feat_proj_dropout
lowerCamelCase = final_dropout
lowerCamelCase = layerdrop
lowerCamelCase = layer_norm_eps
lowerCamelCase = initializer_range
lowerCamelCase = vocab_size
lowerCamelCase = num_clusters
lowerCamelCase = do_stable_layer_norm
lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase = apply_spec_augment
lowerCamelCase = mask_time_prob
lowerCamelCase = mask_time_length
lowerCamelCase = mask_time_min_masks
lowerCamelCase = mask_feature_prob
lowerCamelCase = mask_feature_length
lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase = num_codevectors_per_group
lowerCamelCase = num_codevector_groups
lowerCamelCase = contrastive_logits_temperature
lowerCamelCase = feat_quantizer_dropout
lowerCamelCase = num_negatives
lowerCamelCase = codevector_dim
lowerCamelCase = proj_codevector_dim
lowerCamelCase = diversity_loss_weight
# ctc loss
lowerCamelCase = ctc_loss_reduction
lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = list(_a )
lowerCamelCase = xvector_output_dim
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
__lowerCamelCase = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = 1000
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = __lowerCamelCase = CvtConfig(num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
__lowerCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
__lowerCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowerCamelCase = [2, 2, 20]
__lowerCamelCase = [3, 12, 16]
__lowerCamelCase = [192, 768, 1024]
__lowerCamelCase = CvtForImageClassification(UpperCamelCase__ )
__lowerCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__lowerCamelCase = image_size
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location=torch.device('cpu' ) )
__lowerCamelCase = OrderedDict()
__lowerCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowerCamelCase = list_of_state_dict + cls_token(UpperCamelCase__ )
__lowerCamelCase = list_of_state_dict + embeddings(UpperCamelCase__ )
for cnt in range(config.depth[idx] ):
__lowerCamelCase = list_of_state_dict + attention(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
__lowerCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_84,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 348 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : str = "cpu" , UpperCamelCase__ : str = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
UpperCamelCase = device
UpperCamelCase = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
UpperCamelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCamelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase = torchvision.transforms.Resize(2_2_4 )
UpperCamelCase = torchvision.transforms.CenterCrop(2_2_4 )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.resize(UpperCamelCase__ )
UpperCamelCase = self.center_crop(UpperCamelCase__ )
UpperCamelCase = self.normalize(UpperCamelCase__ )
return images
def __call__( self : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self.preprocess_img(UpperCamelCase__ )
UpperCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Optional[int]=1_0 , UpperCamelCase__ : Optional[int]=0.0_1 , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]="image" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = None
UpperCamelCase = device if device else get_device()
if vqgan:
UpperCamelCase = vqgan
else:
UpperCamelCase = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
UpperCamelCase = clip
else:
UpperCamelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCamelCase = ProcessorGradientFlow(device=self.device )
UpperCamelCase = iterations
UpperCamelCase = lr
UpperCamelCase = log
UpperCamelCase = make_grid
UpperCamelCase = return_val
UpperCamelCase = quantize
UpperCamelCase = self.vqgan.decoder.z_shape
def A ( self : List[Any] , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : List[str]=True ):
"""simple docstring"""
UpperCamelCase = []
if output_path is None:
UpperCamelCase = './animation.gif'
if input_path is None:
UpperCamelCase = self.save_path
UpperCamelCase = sorted(glob(input_path + '/*' ) )
if not len(UpperCamelCase__ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(UpperCamelCase__ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCamelCase = total_duration / len(UpperCamelCase__ )
UpperCamelCase = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
UpperCamelCase = 1.5
UpperCamelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f"""gif saved to {output_path}""" )
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCamelCase = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=2_5_6 ).to(self.device )
UpperCamelCase = preprocess_vqgan(UpperCamelCase__ )
UpperCamelCase , *UpperCamelCase = self.vqgan.encode(UpperCamelCase__ )
return z
def A ( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.latent.detach().requires_grad_()
UpperCamelCase = base_latent + transform_vector
if self.quantize:
UpperCamelCase , *UpperCamelCase = self.vqgan.quantize(UpperCamelCase__ )
else:
UpperCamelCase = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors='pt' , padding=UpperCamelCase__ )
UpperCamelCase = self.clip(**UpperCamelCase__ )
UpperCamelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase = similarity_logits * weights
return similarity_logits.sum()
def A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self._get_clip_similarity(pos_prompts['prompts'] , UpperCamelCase__ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCamelCase = self._get_clip_similarity(neg_prompts['prompts'] , UpperCamelCase__ , weights=neg_prompts['weights'] )
else:
UpperCamelCase = torch.tensor([1] , device=self.device )
UpperCamelCase = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def A ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
UpperCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase = self._add_vector(UpperCamelCase__ )
UpperCamelCase = loop_post_process(UpperCamelCase__ )
UpperCamelCase = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print('CLIP loss' , UpperCamelCase__ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def A ( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
wandb.init(reinit=UpperCamelCase__ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCamelCase = Image.open(UpperCamelCase__ )
UpperCamelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(UpperCamelCase__ ) )
def A ( self : Any , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if not prompts:
return []
UpperCamelCase = []
UpperCamelCase = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
UpperCamelCase = prompt[0]
UpperCamelCase = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase , UpperCamelCase = prompt.split(':' )
UpperCamelCase = float(UpperCamelCase__ )
else:
UpperCamelCase = prompt
UpperCamelCase = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=None , ):
"""simple docstring"""
if image_path:
UpperCamelCase = self._get_latent(UpperCamelCase__ )
else:
UpperCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase = self.process_prompts(UpperCamelCase__ )
UpperCamelCase = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
UpperCamelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
UpperCamelCase = save_path + '_' + get_timestamp()
os.makedirs(UpperCamelCase__ )
UpperCamelCase = save_path
UpperCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(UpperCamelCase__ ) )
UpperCamelCase = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 28 | import socket
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
SCREAMING_SNAKE_CASE__ = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 219 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ):
lowerCAmelCase = symbols(_UpperCAmelCase )
lowerCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
lowerCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 354 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class a :
def __init__( self , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 32
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_12
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=_snake_case )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(_snake_case )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(_snake_case , encoder_hidden_states=_snake_case )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM(config=_snake_case )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=_snake_case )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(_snake_case , _snake_case )
for k, v in name.items():
assert isinstance(_snake_case , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class a ( unittest.TestCase ):
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(_snake_case )[0]
lowerCAmelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _snake_case )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(_snake_case )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 309 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = XLNetTokenizer
lowerCamelCase_ : Union[str, Any] = XLNetTokenizerFast
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Optional[int] = True
def _lowercase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> str:
lowerCamelCase : str = "<s>"
lowerCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> Any:
lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(UpperCamelCase__ ) , 1006 )
def _lowercase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : int = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] )
lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowercase ( self ) -> Any:
lowerCamelCase : Union[str, Any] = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def _lowercase ( self ) -> int:
lowerCamelCase : str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
lowerCamelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase : List[Any] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
lowerCamelCase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _lowercase ( self ) -> Union[str, Any]:
# fmt: off
lowerCamelCase : str = {"input_ids": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 48 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 354 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 20 )-> int:
"""simple docstring"""
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''') | 267 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = '''Salesforce/blip-image-captioning-base'''
UpperCAmelCase_ : List[str] = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCAmelCase_ : Optional[int] = '''image_captioner'''
UpperCAmelCase_ : List[Any] = AutoModelForVisionaSeq
UpperCAmelCase_ : Optional[Any] = ['''image''']
UpperCAmelCase_ : Union[str, Any] = ['''text''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
requires_backends(self , ["""vision"""])
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.pre_processor(images=__lowerCAmelCase , return_tensors="""pt""")
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.pre_processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase)[0].strip()
| 272 | '''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str:
'''simple docstring'''
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class a__( nn.Module ):
'''simple docstring'''
UpperCAmelCase_ : CLIPConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config)
lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype)
lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim))
lowerCAmelCase = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,))
lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1]
lowerCAmelCase = self.visual_projection(__lowerCAmelCase)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds)
lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase)
# Use a lower threshold if an image has any special care concept
lowerCAmelCase = is_special_care * 0.01
lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase = jnp.round(__lowerCAmelCase , 3)
lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = CLIPConfig
UpperCAmelCase_ : Any = '''clip_input'''
UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
lowerCAmelCase = (1, 224, 224, 3)
lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase)
super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase)
lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
| 272 | 1 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 357 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( lowercase_ ):
def decorator(lowercase_ ):
UpperCAmelCase = getattr(snake_case_ , 'handle_key' , [] )
handle += [key]
setattr(snake_case_ , 'handle_key' , snake_case_ )
return func
return decorator
def _lowerCAmelCase ( *lowercase_ ):
def decorator(lowercase_ ):
UpperCAmelCase = getattr(snake_case_ , 'handle_key' , [] )
handle += keys
setattr(snake_case_ , 'handle_key' , snake_case_ )
return func
return decorator
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __new__( cls :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :str ) -> Optional[int]:
UpperCAmelCase = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , 'key_handler' ):
setattr(a__ , 'key_handler' , {} )
setattr(a__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase = getattr(a__ , 'handle_key' , [] )
for key in handled_keys:
UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls :Optional[int] ) -> Dict:
UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase = ord(a__ )
UpperCAmelCase = cls.key_handler.get(a__ )
if handler:
UpperCAmelCase = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : Optional[int] , __A : Union[str, Any]=3 , __A : Dict=3_2 , __A : List[str]=3 , __A : Optional[int]=1_0 , __A : Any=[8, 1_6, 3_2, 6_4] , __A : Optional[Any]=[1, 1, 2, 1] , __A : Dict=True , __A : Optional[Any]=True , __A : Any="relu" , __A : Dict=3 , __A : Dict=None , __A : int=["stage2", "stage3", "stage4"] , __A : List[Any]=[2, 3, 4] , __A : Tuple=1 , ):
snake_case__ : List[Any] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[str] = image_size
snake_case__ : Optional[int] = num_channels
snake_case__ : int = embeddings_size
snake_case__ : Union[str, Any] = hidden_sizes
snake_case__ : Optional[int] = depths
snake_case__ : Union[str, Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = hidden_act
snake_case__ : Dict = num_labels
snake_case__ : Any = scope
snake_case__ : Any = len(__A )
snake_case__ : List[str] = out_features
snake_case__ : Optional[int] = out_indices
snake_case__ : str = num_groups
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : str ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self : Dict , __A : Tuple , __A : int , __A : Tuple ):
snake_case__ : List[Any] = BitModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : Dict , __A : int , __A : Dict , __A : Tuple ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : List[str] = BitForImageClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , __A : int , __A : List[Any] , __A : Optional[Any] ):
snake_case__ : Optional[Any] = BitBackbone(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ : Union[str, Any] = None
snake_case__ : List[Any] = BitBackbone(config=__A )
model.to(__A )
model.eval()
snake_case__ : Any = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.prepare_config_and_inputs()
snake_case__ : Dict = config_and_inputs
snake_case__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a_ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : List[Any] = BitModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__A , has_text_modality=__A )
def _lowercase ( self : int ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : int ):
return
@unittest.skip(reason="Bit does not output attentions" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def _lowercase ( self : str ):
pass
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : int ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def _lowercase ( self : Any ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowercase ( self : Any ):
def check_hidden_states_output(__A : Dict , __A : List[Any] , __A : List[Any] ):
snake_case__ : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : int = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : int = layer_type
snake_case__ : Tuple = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[int] = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _lowercase ( self : int ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self : List[str] ):
snake_case__ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
snake_case__ : int = self.default_image_processor
snake_case__ : int = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__A )
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Dict = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (BitBackbone,) if is_torch_available() else ()
a_ = BitConfig
a_ = False
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = BitModelTester(self )
| 371 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''pegasus'''
__lowercase : Optional[int] = ['''past_key_values''']
__lowercase : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,__UpperCAmelCase=5_0265 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=12 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=16 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1024 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=1 ,**__UpperCAmelCase ,) -> int:
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = encoder_layers
lowerCAmelCase__ : Optional[Any] = encoder_attention_heads
lowerCAmelCase__ : List[Any] = decoder_ffn_dim
lowerCAmelCase__ : Tuple = decoder_layers
lowerCAmelCase__ : int = decoder_attention_heads
lowerCAmelCase__ : List[str] = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : int = activation_dropout
lowerCAmelCase__ : List[Any] = activation_function
lowerCAmelCase__ : Tuple = init_std
lowerCAmelCase__ : List[Any] = encoder_layerdrop
lowerCAmelCase__ : List[str] = decoder_layerdrop
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : Optional[int] = encoder_layers
lowerCAmelCase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,forced_eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ,)
@property
def UpperCAmelCase_ ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self ) -> int:
return self.d_model
| 37 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Tuple:
super().__init__(**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int:
lowerCAmelCase__ : str = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : List[Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,__UpperCAmelCase ):
lowerCAmelCase__ : int = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Dict = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 37 | 1 |
'''simple docstring'''
import os
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : int = len(grid[0] )
_UpperCAmelCase : Tuple = len(a_ )
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = 0
_UpperCAmelCase : Dict = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(a_ ):
for j in range(n_rows - 3 ):
_UpperCAmelCase : Any = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCAmelCase : List[str] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCAmelCase : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCAmelCase : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCAmelCase : Tuple = max(
a_, a_, a_, a_ )
if max_product > largest:
_UpperCAmelCase : Any = max_product
return largest
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = []
with open(os.path.dirname(a_ ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
_UpperCAmelCase : Tuple = [[int(a_ ) for i in grid[j]] for j in range(len(a_ ) )]
return largest_product(a_ )
if __name__ == "__main__":
print(solution()) | 358 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
__a = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCAmelCase : List[str] = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : PreTrainedTokenizerBase
UpperCamelCase_ : Union[bool, str, PaddingStrategy] = True
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = "label" if "label" in features[0].keys() else "labels"
_UpperCAmelCase : Dict = [feature.pop(lowerCAmelCase__ ) for feature in features]
_UpperCAmelCase : str = len(lowerCAmelCase__ )
_UpperCAmelCase : int = len(features[0]["input_ids"] )
_UpperCAmelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
_UpperCAmelCase : List[str] = list(chain(*lowerCAmelCase__ ) )
_UpperCAmelCase : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_UpperCAmelCase : Any = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def __UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", a_, a_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCAmelCase : Union[str, Any] = {}
if data_args.train_file is not None:
_UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase : Optional[Any] = data_args.validation_file
_UpperCAmelCase : Dict = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[int] = load_dataset(
a_, data_files=a_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCAmelCase : Dict = load_dataset(
"swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCAmelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCAmelCase : Optional[Any] = [f"""ending{i}""" for i in range(4 )]
_UpperCAmelCase : List[Any] = "sent1"
_UpperCAmelCase : Optional[int] = "sent2"
if data_args.max_seq_length is None:
_UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_UpperCAmelCase : Dict = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_UpperCAmelCase : Tuple = examples[question_header_name]
_UpperCAmelCase : Optional[Any] = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(a_ )
]
# Flatten out
_UpperCAmelCase : List[str] = list(chain(*a_ ) )
_UpperCAmelCase : Dict = list(chain(*a_ ) )
# Tokenize
_UpperCAmelCase : List[Any] = tokenizer(
a_, a_, truncation=a_, max_length=a_, padding="max_length" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(a_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : int = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = min(len(a_ ), data_args.max_train_samples )
_UpperCAmelCase : List[Any] = train_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : int = min(len(a_ ), data_args.max_eval_samples )
_UpperCAmelCase : List[str] = eval_dataset.select(range(a_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_UpperCAmelCase : Optional[int] = eval_dataset.map(
a_, batched=a_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
_UpperCAmelCase : Tuple = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a_: Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = eval_predictions
_UpperCAmelCase : Union[str, Any] = np.argmax(a_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCAmelCase : Any = Trainer(
model=a_, args=a_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=a_, data_collator=a_, compute_metrics=a_, )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : List[str] = last_checkpoint
_UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=a_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase : str = train_result.metrics
_UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
_UpperCAmelCase : Union[str, Any] = min(a_, len(a_ ) )
trainer.log_metrics("train", a_ )
trainer.save_metrics("train", a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[Any] = trainer.evaluate()
_UpperCAmelCase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
_UpperCAmelCase : Tuple = min(a_, len(a_ ) )
trainer.log_metrics("eval", a_ )
trainer.save_metrics("eval", a_ )
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def __UpperCAmelCase ( a_: int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 17 | 0 |
import os
def _snake_case ( lowerCAmelCase : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
SCREAMING_SNAKE_CASE_ : Any = [
[int(lowerCAmelCase__ ) for element in line.split("," )]
for line in input_file.readlines()
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(matrix[0] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'open-llama'
def __init__( self , __lowerCAmelCase=10_0000 , __lowerCAmelCase=4096 , __lowerCAmelCase=1_1008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = initializer_range
lowercase = rms_norm_eps
lowercase = use_cache
lowercase = kwargs.pop(
"""use_memorry_efficient_attention""" , __lowerCAmelCase )
lowercase = hidden_dropout_prob
lowercase = attention_dropout_prob
lowercase = use_stable_embedding
lowercase = shared_input_output_embedding
lowercase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowercase = self.rope_scaling.get("""type""" , __lowerCAmelCase )
lowercase = self.rope_scaling.get("""factor""" , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 197 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = ConsistencyModelPipeline
_snake_case : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_snake_case : str = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def __UpperCAmelCase ( self , _UpperCamelCase=False ) -> Dict:
if class_cond:
UpperCAmelCase_ : int = self.dummy_cond_unet
else:
UpperCAmelCase_ : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> str:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Any = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components(class_cond=_UpperCamelCase )
UpperCAmelCase_ : str = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[str] = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[Any] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components(class_cond=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : str = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[str] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , _UpperCamelCase=0 , _UpperCamelCase=False , _UpperCamelCase="cpu" , _UpperCamelCase=torch.floataa , _UpperCamelCase=(1, 3, 6_4, 6_4) ) -> List[Any]:
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
UpperCAmelCase_ : str = self.get_fixed_latents(seed=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = latents
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase=0 , _UpperCamelCase="cpu" , _UpperCamelCase=torch.floataa , _UpperCamelCase=(1, 3, 6_4, 6_4) ) -> Union[str, Any]:
if type(_UpperCamelCase ) == str:
UpperCAmelCase_ : Union[str, Any] = torch.device(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
return latents
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : str = self.get_inputs()
UpperCAmelCase_ : Tuple = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : Dict = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : Optional[int] = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : int = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
UpperCAmelCase_ : str = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 145 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = Dict[str, Any]
__UpperCAmelCase = List[Prediction]
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Tuple = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = load_image(_UpperCamelCase )
UpperCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : str = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
UpperCAmelCase_ : Any = target_size
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Any = model_inputs.pop('target_size' )
UpperCAmelCase_ : Optional[Any] = self.model(**_UpperCamelCase )
UpperCAmelCase_ : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = model_inputs['bbox']
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.9 ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : str = target_size[0].tolist()
def unnormalize(_UpperCamelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : Tuple = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['bbox'].squeeze(0 )]
UpperCAmelCase_ : List[str] = ['score', 'label', 'box']
UpperCAmelCase_ : Any = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = raw_annotations[0]
UpperCAmelCase_ : str = raw_annotation['scores']
UpperCAmelCase_ : Tuple = raw_annotation['labels']
UpperCAmelCase_ : List[Any] = raw_annotation['boxes']
UpperCAmelCase_ : Union[str, Any] = scores.tolist()
UpperCAmelCase_ : int = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : Any = [self._get_bounding_box(_UpperCamelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : int = ['score', 'label', 'box']
UpperCAmelCase_ : Dict = [
dict(zip(_UpperCamelCase , _UpperCamelCase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = box.int().tolist()
UpperCAmelCase_ : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 145 | 1 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a ) -> None:
lowercase__ : Union[str, Any] = size
lowercase__ : Any = [0] * size
lowercase__ : str = [0] * size
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return index | (index + 1)
@staticmethod
def _UpperCAmelCase ( a ) -> int:
return (index & (index + 1)) - 1
def _UpperCAmelCase ( self , a , a ) -> None:
lowercase__ : Dict = value
while index < self.size:
lowercase__ : List[Any] = self.get_prev(a ) + 1
if current_left_border == index:
lowercase__ : Union[str, Any] = value
else:
lowercase__ : Any = max(a , a , a )
lowercase__ : List[str] = self.get_next(a )
def _UpperCAmelCase ( self , a , a ) -> int:
right -= 1 # Because of right is exclusive
lowercase__ : Dict = 0
while left <= right:
lowercase__ : str = self.get_prev(a )
if left <= current_left:
lowercase__ : Any = max(a , self.tree[right] )
lowercase__ : Dict = current_left
else:
lowercase__ : Any = max(a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_UpperCamelCase : Union[str, Any] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : int ):
return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : str ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[Any] ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Dict = get_tokens(_lowerCAmelCase )
lowercase__ : List[str] = get_tokens(_lowerCAmelCase )
lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
lowercase__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Any = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = {}
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Any = qa['id']
lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Dict = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowercase__ : Optional[int] = preds[qid]
# Take max over all gold answers
lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = {}
for qid, s in scores.items():
lowercase__ : int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : Optional[Any] = s
return new_scores
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ):
'''simple docstring'''
if not qid_list:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for k in new_eval:
lowercase__ : int = new_eval[k]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
lowercase__ : Tuple = 0.0
lowercase__ : List[str] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Union[str, Any] = [1.0]
lowercase__ : List[Any] = [0.0]
lowercase__ : Optional[int] = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Tuple = true_pos / float(i + 1 )
lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Dict = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : int = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : Tuple = 0.0
lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[int] = scores[qid]
else:
if preds[qid]:
lowercase__ : List[Any] = -1
else:
lowercase__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Dict = cur_score
lowercase__ : Optional[int] = na_probs[qid]
return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = best_exact
lowercase__ : Tuple = exact_thresh
lowercase__ : Optional[Any] = best_fa
lowercase__ : Any = fa_thresh
def a_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase__ : List[Any] = json.load(_lowerCAmelCase )
lowercase__ : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase__ : str = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
else:
lowercase__ : str = {k: 0.0 for k in preds}
lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' )
if no_ans_qids:
lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 77 | 1 |
UpperCAmelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 355 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
UpperCAmelCase_ = 'huggingface-tools/default-prompts'
UpperCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any]="run" ) -> int:
if prompt_or_repo_id is None:
UpperCamelCase__ : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __UpperCAmelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ : Any = cached_file(
__UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 247 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.