code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''table-transformer'''
snake_case__ : Union[str, Any] = ['''past_key_values''']
snake_case__ : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : str=1_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : Dict=6 , SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 , SCREAMING_SNAKE_CASE__ : List[str]=8 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[Any]="sine" , SCREAMING_SNAKE_CASE__ : Dict="resnet50" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : int=5 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , **SCREAMING_SNAKE_CASE__ : int , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a_ : Dict = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : int = backbone_config.get('model_type' )
a_ : Any = CONFIG_MAPPING[backbone_model_type]
a_ : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
a_ , a_ , a_ : Optional[int] = None, None, None
a_ : Union[str, Any] = use_timm_backbone
a_ : str = backbone_config
a_ : str = num_channels
a_ : Tuple = num_queries
a_ : int = d_model
a_ : Optional[int] = encoder_ffn_dim
a_ : str = encoder_layers
a_ : Optional[Any] = encoder_attention_heads
a_ : Any = decoder_ffn_dim
a_ : int = decoder_layers
a_ : Dict = decoder_attention_heads
a_ : Union[str, Any] = dropout
a_ : Dict = attention_dropout
a_ : Dict = activation_dropout
a_ : Optional[Any] = activation_function
a_ : List[str] = init_std
a_ : Any = init_xavier_std
a_ : Union[str, Any] = encoder_layerdrop
a_ : Optional[int] = decoder_layerdrop
a_ : Union[str, Any] = encoder_layers
a_ : Dict = auxiliary_loss
a_ : str = position_embedding_type
a_ : Union[str, Any] = backbone
a_ : Any = use_pretrained_backbone
a_ : List[str] = dilation
# Hungarian matcher
a_ : Tuple = class_cost
a_ : Optional[Any] = bbox_cost
a_ : Any = giou_cost
# Loss coefficients
a_ : Optional[Any] = mask_loss_coefficient
a_ : Union[str, Any] = dice_loss_coefficient
a_ : Dict = bbox_loss_coefficient
a_ : List[Any] = giou_loss_coefficient
a_ : str = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.d_model
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
return 1_2
| 32 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Union[str, Any]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : List[str]=9 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Any=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : List[str]=8 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.0_0_2 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[str]=None , ) -> List[str]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = encoder_seq_length
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = d_ff
A__ = relative_attention_num_buckets
A__ = dropout_rate
A__ = initializer_factor
A__ = eos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = None
A__ = decoder_layers
def a_ ( self : int ) -> Dict:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
A__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
A__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1 )
A__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ = self.get_config()
A__ = config.num_attention_heads
A__ = self.prepare_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, input_dict
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a_ ( self : Any ) -> Tuple:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(
input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , )
A__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
A__ = result.last_hidden_state
A__ = result.past_key_values
A__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval()
# first forward pass
A__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) + 1 )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = model(__lowerCAmelCase )["""last_hidden_state"""]
A__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase )["""last_hidden_state"""]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , ) -> List[Any]:
"""simple docstring"""
A__ = UMTaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).half().eval()
A__ = model(**__lowerCAmelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__lowerCAmelCase ).any().item() )
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCamelCase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCamelCase : Any = [0.8, 0.9]
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
A__ = UMTaModel(config_and_inputs[0] ).to(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=__lowerCAmelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
A__ = self.model_tester.prepare_config_and_inputs()
A__ = config_and_inputs[0]
A__ = UMTaForConditionalGeneration(__lowerCAmelCase ).eval()
model.to(__lowerCAmelCase )
A__ = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__lowerCAmelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(__lowerCAmelCase , head_masking.items() ):
A__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase )
A__ = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , **__lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__lowerCAmelCase , legacy=__lowerCAmelCase )
A__ = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
A__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" , padding=__lowerCAmelCase ).input_ids
# fmt: off
A__ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCAmelCase , __lowerCAmelCase )
A__ = model.generate(input_ids.to(__lowerCAmelCase ) )
A__ = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 355 |
import unittest
import numpy as np
def __lowerCamelCase ( __a :np.ndarray , __a :np.ndarray , __a :np.ndarray , __a :np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
A__ = np.shape(__a )
A__ = np.shape(__a )
A__ = np.shape(__a )
if shape_a[0] != shape_b[0]:
A__ = (
"""Expected the same number of rows for A and B. """
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
A__ = (
"""Expected the same number of columns for B and C. """
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__a )
A__ = pseudo_inv
if a_inv is None:
try:
A__ = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
A__ = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = np.block([[a, b], [b.T, c]] )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def a_ ( self : str ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[str] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 276 | 0 |
'''simple docstring'''
UpperCamelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Optional[int]:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Dict:
return "".join(REVERSE_DICT[char] for char in message.split() )
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Dict = """Morse code here!"""
print(_snake_case )
_lowerCAmelCase : List[Any] = encrypt(_snake_case )
print(_snake_case )
_lowerCAmelCase : int = decrypt(_snake_case )
print(_snake_case )
if __name__ == "__main__":
main()
| 309 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
if openai_config_file == "":
__a =OpenAIGPTConfig()
else:
__a =OpenAIGPTConfig.from_json_file(_snake_case )
__a =OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__a =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__a =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _snake_case )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowerCAmelCase : int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 218 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
_lowerCamelCase : str = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_lowerCamelCase : Tuple = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CamembertTokenizer
UpperCAmelCase__ = CamembertTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = CamembertTokenizer(UpperCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
A__ = '''<pad>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(UpperCAmelCase__) , 1_004)
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005)
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
A__ = CamembertTokenizer(UpperCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
A__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.encode(UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__)
A__ = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(UpperCAmelCase__)
A__ = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(UpperCAmelCase__)
A__ = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = {'''input_ids''': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=UpperCAmelCase__ , )
| 231 |
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0) ->None:
'''simple docstring'''
A__ , A__ = row, column
A__ = [[default_value for c in range(UpperCAmelCase__)] for r in range(UpperCAmelCase__)]
def __str__( self : List[str]) ->str:
'''simple docstring'''
A__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(UpperCAmelCase__ , len(str(UpperCAmelCase__)))
A__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase__ : list[float]) -> str:
nonlocal string_format_identifier
A__ = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__) for row_vector in self.array)
return s
def __repr__( self : Tuple) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple)) and len(UpperCAmelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
A__ = value
def __add__( self : Optional[int] , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : str) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : Union[str, Any] , UpperCAmelCase__ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (int, float)): # Scalar multiplication
A__ = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = f"""Unsupported type given for another ({type(UpperCAmelCase__)})"""
raise TypeError(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Matrix:
'''simple docstring'''
A__ = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
A__ = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 231 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
def get_masked_lm_array(a_: str ):
_UpperCAmelCase : Any = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : List[str] = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_array(a_: str ):
_UpperCAmelCase : Union[str, Any] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : int = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Dict = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_layer_array(a_: int, a_: str ):
_UpperCAmelCase : Dict = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : List[Any] = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_attention_layer_array(a_: int, a_: str, a_: Any ):
_UpperCAmelCase : int = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : Dict = tf.train.load_variable(snake_case_, snake_case_ )
_UpperCAmelCase : Dict = array.reshape(snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(snake_case_ )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCAmelCase : List[Any] = BertConfig.from_json_file(snake_case_ )
_UpperCAmelCase : Any = BertForMaskedLM(snake_case_ )
# Layers
for layer_index in range(0, config.num_hidden_layers ):
_UpperCAmelCase : Optional[Any] = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCAmelCase : Optional[Any] = layer.attention.self
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_query_dense/kernel", self_attn.query.weight.data.shape )
_UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
snake_case_, "_query_dense/bias", self_attn.query.bias.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_key_dense/kernel", self_attn.key.weight.data.shape )
_UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
snake_case_, "_key_dense/bias", self_attn.key.bias.data.shape )
_UpperCAmelCase : int = get_encoder_attention_layer_array(
snake_case_, "_value_dense/kernel", self_attn.value.weight.data.shape )
_UpperCAmelCase : int = get_encoder_attention_layer_array(
snake_case_, "_value_dense/bias", self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCAmelCase : Union[str, Any] = layer.attention.output
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
snake_case_, "_output_dense/kernel", self_output.dense.weight.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_output_dense/bias", self_output.dense.bias.data.shape )
_UpperCAmelCase : Optional[Any] = get_encoder_layer_array(snake_case_, "_attention_layer_norm/gamma" )
_UpperCAmelCase : str = get_encoder_layer_array(snake_case_, "_attention_layer_norm/beta" )
# Intermediate
_UpperCAmelCase : Any = layer.intermediate
_UpperCAmelCase : str = get_encoder_layer_array(snake_case_, "_intermediate_dense/kernel" )
_UpperCAmelCase : Any = get_encoder_layer_array(snake_case_, "_intermediate_dense/bias" )
# Output
_UpperCAmelCase : Optional[int] = layer.output
_UpperCAmelCase : Tuple = get_encoder_layer_array(snake_case_, "_output_dense/kernel" )
_UpperCAmelCase : Any = get_encoder_layer_array(snake_case_, "_output_dense/bias" )
_UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(snake_case_, "_output_layer_norm/gamma" )
_UpperCAmelCase : int = get_encoder_layer_array(snake_case_, "_output_layer_norm/beta" )
# Embeddings
_UpperCAmelCase : Optional[Any] = get_encoder_array("_position_embedding_layer/embeddings" )
_UpperCAmelCase : Optional[int] = get_encoder_array("_type_embedding_layer/embeddings" )
_UpperCAmelCase : Optional[int] = get_encoder_array("_embedding_norm_layer/gamma" )
_UpperCAmelCase : Tuple = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
_UpperCAmelCase : Tuple = model.cls.predictions.transform
_UpperCAmelCase : Optional[int] = get_masked_lm_array("dense/kernel" )
_UpperCAmelCase : Optional[int] = get_masked_lm_array("dense/bias" )
_UpperCAmelCase : int = get_masked_lm_array("layer_norm/gamma" )
_UpperCAmelCase : int = get_masked_lm_array("layer_norm/beta" )
_UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
_UpperCAmelCase : Optional[int] = BertPooler(config=snake_case_ )
_UpperCAmelCase : List[str] = get_encoder_array("_pooler_layer/kernel" )
_UpperCAmelCase : str = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(snake_case_ )
# Integration test - should load without any errors ;)
_UpperCAmelCase : Optional[Any] = BertForMaskedLM.from_pretrained(snake_case_ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__a = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , )-> float:
'''simple docstring'''
UpperCAmelCase__ : List[str] = x_start
UpperCAmelCase__ : List[Any] = fnc(snake_case )
UpperCAmelCase__ : Optional[int] = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase__ : int = (x_end - x_start) / steps + xa
UpperCAmelCase__ : int = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase__ : Any = xa
UpperCAmelCase__ : Any = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_lowerCAmelCase : List[str] = 10
while i <= 100_000:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 298 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[Any] = logging.get_logger("transformers.models.speecht5")
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
hf_model.apply_weight_norm()
__a =checkpoint['input_conv.weight_g']
__a =checkpoint['input_conv.weight_v']
__a =checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__a =checkpoint[F'upsamples.{i}.1.weight_g']
__a =checkpoint[F'upsamples.{i}.1.weight_v']
__a =checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__a =checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
__a =checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
__a =checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
__a =checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
__a =checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
__a =checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
__a =checkpoint['output_conv.1.weight_g']
__a =checkpoint['output_conv.1.weight_v']
__a =checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase_( _snake_case : str , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[str]=None , _snake_case : Optional[int]=None , ):
"""simple docstring"""
if config_path is not None:
__a =SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
__a =SpeechTaHifiGanConfig()
__a =SpeechTaHifiGan(_snake_case )
__a =torch.load(_snake_case )
load_weights(orig_checkpoint['model']['generator'] , _snake_case , _snake_case )
__a =np.load(_snake_case )
__a =stats[0].reshape(-1 )
__a =stats[1].reshape(-1 )
__a =torch.from_numpy(_snake_case ).float()
__a =torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 218 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = original_name.split('.' )[0]
SCREAMING_SNAKE_CASE_ = key.split('.' )
SCREAMING_SNAKE_CASE_ = int(key_list[key_list.index(lowercase__ ) - 2] )
SCREAMING_SNAKE_CASE_ = int(key_list[key_list.index(lowercase__ ) - 1] )
SCREAMING_SNAKE_CASE_ = orig_block_num - offset
SCREAMING_SNAKE_CASE_ = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
SCREAMING_SNAKE_CASE_ = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE_ = key[: key.find('proj' )]
SCREAMING_SNAKE_CASE_ = key.replace(lowercase__ , f"patch_embeddings.{total_embed_found}." )
SCREAMING_SNAKE_CASE_ = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE_ = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'norm1' , 'before_norm' )
if "norm2" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE_ = replace_key_with_offset(lowercase__ , lowercase__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
SCREAMING_SNAKE_CASE_ = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ = model_name[-3:]
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = (1, 10_00)
# set config attributes
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE_ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ = 4.0
SCREAMING_SNAKE_CASE_ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE_ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE_ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ = 4.0
SCREAMING_SNAKE_CASE_ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE_ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE_ = [64, 1_28, 3_20, 5_12]
SCREAMING_SNAKE_CASE_ = 4.0
SCREAMING_SNAKE_CASE_ = 1E-6
SCREAMING_SNAKE_CASE_ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE_ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE_ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE_ = 4.0
SCREAMING_SNAKE_CASE_ = 1E-6
SCREAMING_SNAKE_CASE_ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE_ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE_ = [96, 1_92, 3_84, 7_68]
SCREAMING_SNAKE_CASE_ = 4.0
SCREAMING_SNAKE_CASE_ = 1E-6
SCREAMING_SNAKE_CASE_ = 0.9_5
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
SCREAMING_SNAKE_CASE_ = PoolFormerImageProcessor(crop_pct=lowercase__ )
# Prepare image
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
SCREAMING_SNAKE_CASE_ = torch.load(lowercase__ , map_location=torch.device('cpu' ) )
# rename keys
SCREAMING_SNAKE_CASE_ = rename_keys(lowercase__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = PoolFormerForImageClassification(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE_ = PoolFormerImageProcessor(crop_pct=lowercase__ )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE_ = model(lowercase__ )
SCREAMING_SNAKE_CASE_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase__ : List[Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 363 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 210 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] , lowercase : Optional[Any] ) -> float:
_a = x
_a = y
for step in range(UpperCamelCase_ ): # noqa: B007
_a = a * a - b * b + x
_a = 2 * a * b + y
_a = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCamelCase ( lowercase : Dict ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCamelCase ( lowercase : int ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _lowerCamelCase ( lowercase : Union[str, Any] = 800 , lowercase : Optional[int] = 600 , lowercase : Optional[int] = -0.6 , lowercase : List[str] = 0 , lowercase : List[str] = 3.2 , lowercase : Optional[int] = 50 , lowercase : str = True , ) -> Image.Image:
_a = Image.new("RGB" , (image_width, image_height) )
_a = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
_a = figure_width / image_width * image_height
_a = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a = get_color_coded_rgb(UpperCamelCase_ )
else:
_a = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase_ : Dict = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 63 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: Any , a: Any , a: Any=7 , a: Union[str, Any]=3 , a: int=18 , a: Dict=30 , a: Optional[Any]=400 , a: Dict=True , a: Tuple=None , a: Tuple=True , a: Optional[int]=False , a: Union[str, Any]=True , a: Optional[int]=True , a: Union[str, Any]=[0.5, 0.5, 0.5] , a: Union[str, Any]=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Union[str, Any] = min_resolution
__lowerCamelCase : Dict = max_resolution
__lowerCamelCase : List[Any] = do_resize
__lowerCamelCase : List[Any] = size if size is not None else {'height': 18, 'width': 20}
__lowerCamelCase : int = do_thumbnail
__lowerCamelCase : List[Any] = do_align_axis
__lowerCamelCase : Optional[Any] = do_pad
__lowerCamelCase : List[Any] = do_normalize
__lowerCamelCase : Tuple = image_mean
__lowerCamelCase : List[str] = image_std
def _snake_case ( self: Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DonutImageProcessor if is_vision_available() else None
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = DonutImageProcessingTester(self )
@property
def _snake_case ( self: int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
__lowerCamelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def _snake_case ( self: Dict ):
pass
@is_flaky()
def _snake_case ( self: Any ):
# Initialize image_processing
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _snake_case ( self: str ):
# Initialize image_processing
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
__lowerCamelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowerCamelCase : Union[str, Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _snake_case ( self: int ):
# Initialize image_processing
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowerCamelCase : Union[str, Any] = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 194 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: int , a: Optional[Any] , a: Optional[Any]=3 , a: List[str]=32 , a: Optional[int]=3 , a: Any=10 , a: List[str]=[10, 20, 30, 40] , a: Any=[1, 1, 2, 1] , a: Optional[int]=True , a: List[str]=True , a: Tuple="relu" , a: List[Any]=3 , a: List[Any]=None , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Any = batch_size
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = embeddings_size
__lowerCamelCase : Optional[int] = hidden_sizes
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : Tuple = scope
__lowerCamelCase : Union[str, Any] = len(a )
def _snake_case ( self: int ):
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values
def _snake_case ( self: List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self: Tuple , a: Optional[int] , a: int ):
__lowerCamelCase : Optional[Any] = FlaxRegNetModel(config=a )
__lowerCamelCase : List[str] = model(a )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self: Optional[int] , a: List[Any] , a: List[Any] ):
__lowerCamelCase : Tuple = self.num_labels
__lowerCamelCase : Union[str, Any] = FlaxRegNetForImageClassification(config=a )
__lowerCamelCase : List[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Tuple ):
__lowerCamelCase : Dict = FlaxRegNetModelTester(self )
__lowerCamelCase : List[str] = ConfigTester(self , config_class=a , has_text_modality=a )
def _snake_case ( self: Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self: List[Any] ):
return
def _snake_case ( self: Dict ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _snake_case ( self: Tuple ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(a )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: List[str] ):
def check_hidden_states_output(a: List[Any] , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : str = model_class(a )
__lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
check_hidden_states_output(a , a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : str = self._prepare_for_class(a , a )
__lowerCamelCase : List[str] = model_class(a )
@jax.jit
def model_jitted(a: Optional[int] , **a: str ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : List[str] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Optional[int] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Tuple ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCamelCase : Tuple = self.default_image_processor
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=a , return_tensors='np' )
__lowerCamelCase : List[Any] = model(**a )
# verify the logits
__lowerCamelCase : str = (1, 1000)
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : int = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 194 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = """new-model"""
if is_tf_available():
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[int] = NewModelConfig
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str ="bert-base-cased"
a__ : Any =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[Any] =TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] ="bert-base-cased"
a__ : Dict =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Any =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
a__ , a__ : Union[str, Any] =TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[int] =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
a__ , a__ : Dict =TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Any =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
a__ , a__ : List[Any] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : Any =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[Any] =TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : Any =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict =TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a__ : Any =AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
a__ , a__ : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4_4_1_0 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : int =TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4_4_1_0 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : str =TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] =copy.deepcopy(model.config )
a__ : Union[str, Any] =["FunnelBaseModel"]
a__ : Dict =TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
a__ : List[str] =TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
a__ : Tuple =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ : List[str] =BertModelTester(self ).get_config()
a__ : Dict =NewModelConfig(**tiny_config.to_dict() )
a__ : Optional[int] =auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
a__ : int =auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
a__ : Dict =TFAutoModel.from_pretrained("bert-base" )
def _lowercase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a__ : int =TFAutoModel.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
a__ : Optional[int] =TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(lowerCAmelCase__ , "Use `from_pt=True` to load this model" ):
a__ : Optional[Any] =TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
a__ : List[str] =TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
a__ : Any =TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
a__ : Dict =TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 95 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :List[Any] = logging.getLogger(__name__)
def __snake_case ( ):
__a = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_UpperCAmelCase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_UpperCAmelCase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_UpperCAmelCase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_UpperCAmelCase , default='''data/dump''' , help='''The dump file prefix.''' )
__a = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__a = BertTokenizer.from_pretrained(args.tokenizer_name )
__a = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__a = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__a = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__a = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__a = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__a = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__a = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__a = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__a = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'{len(_UpperCAmelCase )} examples to process.' )
__a = []
__a = 0
__a = 10000
__a = time.time()
for text in data:
__a = f'{bos} {text.strip()} {sep}'
__a = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
__a = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__a = time.time()
logger.info('''Finished binarization''' )
logger.info(f'{len(_UpperCAmelCase )} examples processed.' )
__a = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__a = tokenizer.vocab_size
if vocab_size < (1 << 16):
__a = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
__a = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 366 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = field
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Json(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , field=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : Union[PathLike, BinaryIO] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = '''utf-8'''
__a = to_json_kwargs
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.to_json_kwargs.pop('''path_or_buf''' , __SCREAMING_SNAKE_CASE)
__a = self.to_json_kwargs.pop('''orient''' , '''records''')
__a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False)
__a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True)
__a = self.to_json_kwargs.pop('''compression''' , __SCREAMING_SNAKE_CASE)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__SCREAMING_SNAKE_CASE) as buffer:
__a = self._write(file_obj=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''')
__a = self._write(
file_obj=self.path_or_buf , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs)
return written
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if not json_str.endswith('''\n'''):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : BinaryIO , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__SCREAMING_SNAKE_CASE)
else:
__a , __a = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__SCREAMING_SNAKE_CASE)
return written
| 131 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = params
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Optional[int] = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def __UpperCamelCase( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.params.max_model_input_size
UpperCamelCase : Dict = self.lengths > max_len
logger.info(F"""Splitting {sum(A_ )} too long sequences.""" )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
UpperCamelCase : List[str] = []
UpperCamelCase : Tuple = []
if self.params.mlm:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase : Dict = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase : Union[str, Any] = np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
UpperCamelCase : Optional[int] = np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
UpperCamelCase : Union[str, Any] = np.array(A_ )
UpperCamelCase : Union[str, Any] = np.array(A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = len(self )
UpperCamelCase : Dict = self.lengths > 11
UpperCamelCase : List[str] = self.token_ids[indices]
UpperCamelCase : Tuple = self.lengths[indices]
UpperCamelCase : Optional[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase : List[str] = self.params.special_tok_ids["unk_token"]
UpperCamelCase : int = len(self )
UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase : List[Any] = (unk_occs / self.lengths) < 0.5
UpperCamelCase : List[Any] = self.token_ids[indices]
UpperCamelCase : str = self.lengths[indices]
UpperCamelCase : Dict = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = [t[0] for t in batch]
UpperCamelCase : str = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
UpperCamelCase : Union[str, Any] = max(A_ )
# Pad token ids
if self.params.mlm:
UpperCamelCase : List[Any] = self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase : Dict = self.params.special_tok_ids["unk_token"]
UpperCamelCase : Optional[int] = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
UpperCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase : Any = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 52 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 1 |
def A_ ( A__ , A__ ) -> tuple[float, float]:
# Check if the input is valid
if not len(A__ ) == len(A__ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
a__ , a__ , a__ : Optional[Any] = equationa
a__ , a__ , a__ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
a__ : int = aa * ba - aa * ba
a__ : Any = ca * ba - ca * ba
a__ : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a__ : Union[str, Any] = determinant_x / determinant
a__ : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 225 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase : Optional[Any] = """"""
lowercase : int = """"""
lowercase : List[Any] = """"""
lowercase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def A_ ( ) -> None:
a__ , a__ : str = get_dataset(A__ , A__ )
print('Processing...' )
a__ , a__ , a__ : Tuple = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a__ : int = random_chars(32 )
a__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
a__ : Optional[int] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(A__ )} with {file_name}' )
a__ : List[str] = []
for anno in new_annos[index]:
a__ : Union[str, Any] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A__ )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def A_ ( A__ , A__ ) -> tuple[list, list]:
a__ : int = []
a__ : int = []
for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ):
a__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(A__ ) as in_file:
a__ : Tuple = in_file.readlines()
a__ : Dict = os.path.join(A__ , F'{label_name}.jpg' )
a__ : int = []
for obj_list in obj_lists:
a__ : Union[str, Any] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def A_ ( A__ , A__ , A__ = 1 ) -> tuple[list, list, list]:
a__ : Optional[int] = []
a__ : Any = []
a__ : Dict = []
for idx in range(len(A__ ) ):
a__ : Optional[int] = []
a__ : Optional[Any] = img_list[idx]
path_list.append(A__ )
a__ : Union[str, Any] = anno_list[idx]
a__ : List[str] = cva.imread(A__ )
if flip_type == 1:
a__ : List[str] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
a__ : Optional[Any] = cva.flip(A__ , A__ )
for bbox in img_annos:
a__ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def A_ ( A__ = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 225 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowerCAmelCase = {
'''camembert-base''': 5_12,
}
__lowerCAmelCase = '''▁'''
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<s>NOTUSED", "</s>NOTUSED"] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
lowercase__: int = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__: Optional[int] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowercase__: Dict = len(self.fairseq_tokens_to_ids )
lowercase__: Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: List[Any] = [self.cls_token_id]
lowercase__: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: List[str] = [self.sep_token_id]
lowercase__: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCAmelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: List[str] = []
lowercase__: Optional[int] = ''
lowercase__: Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
lowercase__: int = True
lowercase__: Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Dict = self.__dict__.copy()
lowercase__: Dict = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Optional[int] = {}
lowercase__: Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 196 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: List[str] = set()
lowercase__: List[Any] = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
lowercase__: Optional[Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
lowercase__: List[str] = '\n'.join(snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
lowercase__: Union[str, Any] = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
lowercase__: Dict = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: Optional[Any] = set()
lowercase__: int = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ ( snake_case ) -> str:
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 196 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
def _lowerCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 248 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: bool = True , UpperCamelCase: bool = False ) -> Tuple:
snake_case__ = scheduler
snake_case__ = optimizers if isinstance(UpperCamelCase , (list, tuple) ) else [optimizers]
snake_case__ = split_batches
snake_case__ = step_with_optimizer
snake_case__ = GradientState()
def lowerCAmelCase_ ( self: Tuple , *UpperCamelCase: Optional[int] , **UpperCamelCase: Union[str, Any] ) -> Union[str, Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase , **UpperCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase , **UpperCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case__ = AcceleratorState().num_processes
for _ in range(UpperCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase , **UpperCamelCase )
else:
self.scheduler.step(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
return self.scheduler.get_last_lr()
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return self.scheduler.state_dict()
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: int ) -> List[Any]:
self.scheduler.load_state_dict(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Any:
return self.scheduler.get_lr()
def lowerCAmelCase_ ( self: str , *UpperCamelCase: Optional[Any] , **UpperCamelCase: int ) -> str:
return self.scheduler.print_lr(*UpperCamelCase , **UpperCamelCase )
| 307 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = nn.functional.normalize(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.functional.normalize(snake_case__ )
return torch.mm(snake_case__, normalized_text_embeds.t() )
class lowerCAmelCase__( a__ ):
__snake_case = CLIPConfig
__snake_case = ["CLIPEncoderLayer"]
def __init__( self , __lowerCamelCase ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Any = CLIPVisionModel(config.vision_config )
_SCREAMING_SNAKE_CASE : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
_SCREAMING_SNAKE_CASE : int = self.visual_projection(SCREAMING_SNAKE_CASE_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_SCREAMING_SNAKE_CASE : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : List[str] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds ).cpu().float().numpy()
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Dict = image_embeds.shape[0]
for i in range(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE : Any = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : int = self.special_care_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_SCREAMING_SNAKE_CASE : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
_SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE_ )
result.append(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
_SCREAMING_SNAKE_CASE : str = self.visual_projection(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE : Dict = cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
_SCREAMING_SNAKE_CASE : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_SCREAMING_SNAKE_CASE : Dict = 0.0
_SCREAMING_SNAKE_CASE : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
_SCREAMING_SNAKE_CASE : List[Any] = special_care * 0.01
_SCREAMING_SNAKE_CASE : Union[str, Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 365 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase ) | 325 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( __a ):
_lowercase ='''new-model'''
if is_tf_available():
class _lowerCAmelCase ( __a ):
_lowercase =NewModelConfig
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = "bert-base-cased"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "bert-base-cased"
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> int:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
def __a ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
@slow
@require_tensorflow_probability
def __a ( self ) -> Tuple:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
_UpperCamelCase , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCamelCase ) , 14_410 )
def __a ( self ) -> Dict:
lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=_UpperCamelCase ) , 14_410 )
def __a ( self ) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
lowerCAmelCase_ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = copy.deepcopy(model.config )
lowerCAmelCase_ = ["FunnelBaseModel"]
lowerCAmelCase_ = TFAutoModel.from_config(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = TFAutoModel.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
try:
AutoConfig.register("new-model" , _UpperCamelCase )
lowerCAmelCase_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_UpperCamelCase ):
auto_class.register(_UpperCamelCase , _UpperCamelCase )
auto_class.register(_UpperCamelCase , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
auto_class.register(_UpperCamelCase , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ = BertModelTester(self ).get_config()
lowerCAmelCase_ = NewModelConfig(**tiny_config.to_dict() )
lowerCAmelCase_ = auto_class.from_config(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = auto_class.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __a ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase_ = TFAutoModel.from_pretrained("bert-base" )
def __a ( self ) -> Any:
with self.assertRaisesRegex(
_UpperCamelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase_ = TFAutoModel.from_pretrained(_UpperCamelCase , revision="aaaaaa" )
def __a ( self ) -> List[str]:
with self.assertRaisesRegex(
_UpperCamelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
lowerCAmelCase_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __a ( self ) -> List[Any]:
with self.assertRaisesRegex(_UpperCamelCase , "Use `from_pt=True` to load this model" ):
lowerCAmelCase_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __a ( self ) -> Optional[int]:
# Make sure we have cached the model.
lowerCAmelCase_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
lowerCAmelCase_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
lowerCAmelCase_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
lowerCAmelCase_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 231 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 1 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase_ : int | float | str , UpperCamelCase_ : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
lowerCAmelCase__ = int(UpperCamelCase_ )
lowerCAmelCase__ = int(UpperCamelCase_ )
lowerCAmelCase__ = []
for temp in range(int(UpperCamelCase_ ) ):
series.append(F"1 / {pow(temp + 1 , int(UpperCamelCase_ ) )}" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Tuple = int(input("""Enter the last number (nth term) of the P-Series"""))
__snake_case : str = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 356 |
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x
lowerCAmelCase__ = y
for step in range(UpperCamelCase_ ): # noqa: B007
lowerCAmelCase__ = a * a - b * b + x
lowerCAmelCase__ = 2 * a * b + y
lowerCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase_ : int = 800 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = -0.6 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 3.2 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCAmelCase__ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ = figure_width / image_width * image_height
lowerCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ = get_color_coded_rgb(UpperCamelCase_ )
else:
lowerCAmelCase__ = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False )-> Dict:
UpperCamelCase = """backbone.""" if is_semantic else """"""
UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False )-> Dict:
for i in range(config.num_hidden_layers ):
UpperCamelCase = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCamelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase = q_bias
UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase = gamma_a
UpperCamelCase = gamma_a
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
UpperCamelCase = dct.pop(__UpperCamelCase )
UpperCamelCase = val
def lowercase__ ( )-> Tuple:
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> Union[str, Any]:
UpperCamelCase = False if """rvlcdip""" in checkpoint_url else True
UpperCamelCase = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase = 16
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """rvlcdip-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="""cpu""" )["""model"""]
UpperCamelCase = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
UpperCamelCase = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
UpperCamelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
UpperCamelCase = model(__UpperCamelCase )
UpperCamelCase = outputs.logits
# verify logits
UpperCamelCase = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
UpperCamelCase = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCamelCase = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 321 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 321 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__magic_name__ = datasets.utils.logging.get_logger(__name__)
__magic_name__ = ["names", "prefix"]
__magic_name__ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__magic_name__ = ["encoding_errors", "on_bad_lines"]
__magic_name__ = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowercase : str = ","
__lowercase : Optional[str] = None
__lowercase : Optional[Union[int, List[int], str]] = "infer"
__lowercase : Optional[List[str]] = None
__lowercase : Optional[List[str]] = None
__lowercase : Optional[Union[int, str, List[int], List[str]]] = None
__lowercase : Optional[Union[List[int], List[str]]] = None
__lowercase : Optional[str] = None
__lowercase : bool = True
__lowercase : Optional[Literal["c", "python", "pyarrow"]] = None
__lowercase : Dict[Union[int, str], Callable[[Any], Any]] = None
__lowercase : Optional[list] = None
__lowercase : Optional[list] = None
__lowercase : bool = False
__lowercase : Optional[Union[int, List[int]]] = None
__lowercase : Optional[int] = None
__lowercase : Optional[Union[str, List[str]]] = None
__lowercase : bool = True
__lowercase : bool = True
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[str] = None
__lowercase : str = "."
__lowercase : Optional[str] = None
__lowercase : str = '"'
__lowercase : int = 0
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : bool = True
__lowercase : bool = True
__lowercase : int = 0
__lowercase : bool = True
__lowercase : bool = False
__lowercase : Optional[str] = None
__lowercase : int = 10000
__lowercase : Optional[datasets.Features] = None
__lowercase : Optional[str] = "strict"
__lowercase : Literal["error", "warn", "skip"] = "error"
__lowercase : Optional[str] = None
def snake_case_ ( self):
if self.delimiter is not None:
__SCREAMING_SNAKE_CASE = self.delimiter
if self.column_names is not None:
__SCREAMING_SNAKE_CASE = self.column_names
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowercase : str = CsvConfig
def snake_case_ ( self):
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , lowerCAmelCase__):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase__ , (str, list, tuple)):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [files]
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [files]
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files}))
return splits
def snake_case_ ( self , lowerCAmelCase__):
if self.config.features is not None:
__SCREAMING_SNAKE_CASE = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__) for feature in self.config.features.values()):
# cheaper cast
__SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(lowerCAmelCase__ , lowerCAmelCase__)
return pa_table
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__SCREAMING_SNAKE_CASE = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__)):
__SCREAMING_SNAKE_CASE = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = pa.Table.from_pandas(lowerCAmelCase__)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__)
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(lowerCAmelCase__)}: {e}")
raise
| 366 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__magic_name__ = False
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 1_2
@property
def snake_case_ ( self):
return 3_2
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__)
@property
def snake_case_ ( self):
torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = 1_2
__SCREAMING_SNAKE_CASE = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__SCREAMING_SNAKE_CASE = TransformeraDModel(**lowerCAmelCase__)
return model
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """cpu"""
__SCREAMING_SNAKE_CASE = self.dummy_vqvae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = self.dummy_tokenizer
__SCREAMING_SNAKE_CASE = self.dummy_transformer
__SCREAMING_SNAKE_CASE = VQDiffusionScheduler(self.num_embed)
__SCREAMING_SNAKE_CASE = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """teddy bear playing in the pool"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""")
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
__SCREAMING_SNAKE_CASE = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""")
__SCREAMING_SNAKE_CASE = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""")
__SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__)
pipeline.set_progress_bar_config(disable=lowerCAmelCase__)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
__SCREAMING_SNAKE_CASE = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image).max() < 2.0
| 255 | 0 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[str] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(_lowerCAmelCase , ["torch"] )
def A_ ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(_lowerCAmelCase , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[str] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[str] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :int = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[int] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[str] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :str = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Any = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Union[str, Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :List[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Dict = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Tuple = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class A__ ( metaclass=__snake_case ):
_UpperCAmelCase :Optional[Any] = ['torch']
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __UpperCamelCase( cls , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
| 52 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def snake_case ( A__ ):
if isinstance(A__ ,np.ndarray ):
return list(tensor.shape )
UpperCAmelCase_ : Optional[Any] = tf.shape(A__ )
if tensor.shape == tf.TensorShape(A__ ):
return dynamic
UpperCAmelCase_ : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(A__ )]
def snake_case ( A__ ,A__ = None ,A__ = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=A__ ,name=A__ )
def snake_case ( A__ ,A__ ,A__ ,A__=1e-5 ,A__=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(A__ ,A__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
UpperCAmelCase_ , UpperCAmelCase_ : int = tf.nn.moments(A__ ,axes=[axis] ,keepdims=A__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase_ : int = [1] * inputs.shape.rank
UpperCAmelCase_ : List[str] = shape_list(A__ )[axis]
UpperCAmelCase_ : List[str] = tf.reshape(A__ ,A__ )
UpperCAmelCase_ : int = tf.reshape(A__ ,A__ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase_ : int = tf.nn.batch_normalization(
A__ ,A__ ,A__ ,offset=A__ ,scale=A__ ,variance_epsilon=A__ ,)
return outputs
def snake_case ( A__ ,A__=0 ,A__=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase_ : Optional[Any] = tf.shape(A__ )
UpperCAmelCase_ : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase_ : Any = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(A__ ,A__ )
def snake_case ( A__ ):
if not isinstance(A__ ,tf.Tensor ):
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(A__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase_ : int = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase_ : int = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase_ : int = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case ( A__ ,A__ ,A__ = "input_ids" ):
tf.debugging.assert_less(
A__ ,tf.cast(A__ ,dtype=tensor.dtype ) ,message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(A__ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase_ : List[str] = [x for x in data if len(A__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase_ : Dict = np.asarray(A__ )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Union[str, Any] = np.array_split(A__ ,A__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase_ : Dict = np.array_split(A__ ,A__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(A__ ):
UpperCAmelCase_ : str = chunk_data
else:
UpperCAmelCase_ : Tuple = data
def snake_case ( A__ ,A__ ):
if name in group.attrs:
UpperCAmelCase_ : List[str] = [n.decode("utf8" ) if hasattr(A__ ,"decode" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(A__ ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case ( A__ ):
def _expand_single_ad_tensor(A__ ):
if isinstance(A__ ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(A__ ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,A__ )
| 253 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''longformer'''
def __init__( self : List[str] , lowerCAmelCase_ : Union[List[int], int] = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 30_522 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = attention_window
UpperCAmelCase_ : Dict = sep_token_id
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Dict = eos_token_id
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = onnx_export
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = True
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Dict = super().outputs
if self.task == "default":
UpperCAmelCase_ : List[str] = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Tuple = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : str = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : Union[str, Any] = 1
return inputs
| 253 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''')
_UpperCamelCase = text_classifier('''This is great !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}])
_UpperCamelCase = text_classifier('''This is great !''' , top_k=2)
self.assertEqual(
nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}])
_UpperCamelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2)
self.assertEqual(
nested_simplify(__a) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCamelCase = text_classifier('''This is great !''' , top_k=1)
self.assertEqual(nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}])
# Legacy behavior
_UpperCamelCase = text_classifier('''This is great !''' , return_all_scores=__a)
self.assertEqual(nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}])
_UpperCamelCase = text_classifier('''This is great !''' , return_all_scores=__a)
self.assertEqual(
nested_simplify(__a) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]])
_UpperCamelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a)
self.assertEqual(
nested_simplify(__a) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCamelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=__a)
self.assertEqual(
nested_simplify(__a) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
import torch
_UpperCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''') , )
_UpperCamelCase = text_classifier('''This is great !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}])
@require_tf
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''')
_UpperCamelCase = text_classifier('''This is great !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''LABEL_0''', '''score''': 0.504}])
@slow
@require_torch
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = pipeline('''text-classification''')
_UpperCamelCase = text_classifier('''This is great !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''POSITIVE''', '''score''': 1.0}])
_UpperCamelCase = text_classifier('''This is bad !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}])
_UpperCamelCase = text_classifier('''Birds are a type of animal''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''POSITIVE''', '''score''': 0.988}])
@slow
@require_tf
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = pipeline('''text-classification''' , framework='''tf''')
_UpperCamelCase = text_classifier('''This is great !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''POSITIVE''', '''score''': 1.0}])
_UpperCamelCase = text_classifier('''This is bad !''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}])
_UpperCamelCase = text_classifier('''Birds are a type of animal''')
self.assertEqual(nested_simplify(__a) , [{'''label''': '''POSITIVE''', '''score''': 0.988}])
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TextClassificationPipeline(model=__a , tokenizer=__a)
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_UpperCamelCase = '''HuggingFace is in'''
_UpperCamelCase = text_classifier(__a)
self.assertEqual(nested_simplify(__a) , [{'''label''': ANY(__a), '''score''': ANY(__a)}])
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values())
_UpperCamelCase = ['''HuggingFace is in ''', '''Paris is in France''']
_UpperCamelCase = text_classifier(__a)
self.assertEqual(
nested_simplify(__a) , [{'''label''': ANY(__a), '''score''': ANY(__a)}, {'''label''': ANY(__a), '''score''': ANY(__a)}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values())
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_UpperCamelCase = text_classifier(__a , top_k=__a)
_UpperCamelCase = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(__a) , [[{'''label''': ANY(__a), '''score''': ANY(__a)}] * N, [{'''label''': ANY(__a), '''score''': ANY(__a)}] * N] , )
_UpperCamelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_UpperCamelCase = text_classifier(__a)
self.assertEqual(
nested_simplify(__a) , {'''label''': ANY(__a), '''score''': ANY(__a)} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_UpperCamelCase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(__a):
text_classifier(__a)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_UpperCamelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]])
self.assertEqual(
nested_simplify(__a) , [{'''label''': ANY(__a), '''score''': ANY(__a)}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values())
| 194 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = """http://www.mocksite.com/file1.txt"""
_a = """\"text\": [\"foo\", \"foo\"]"""
_a = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase:
lowercase__ = 2_00
lowercase__ = {'Content-Length': '100'}
lowercase__ = {}
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return [bytes(__a , '''utf-8''')]
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> int:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case, '''request''', __snake_case )
_UpperCamelCase = URL
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = url
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [url]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(__snake_case, __snake_case ), use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.download(__snake_case )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(__snake_case, __snake_case ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case, __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(__snake_case )
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = filename
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [filename]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=__snake_case, use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.extract(__snake_case )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(__snake_case, __snake_case ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case, __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case, etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ), start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 194 | 1 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCAmelCase ( a__, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = 4
__snake_case : Dict = 3
__snake_case : Optional[int] = (32, 32)
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = jax.random.uniform(SCREAMING_SNAKE_CASE_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
| 364 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
if attention_mask is None:
__lowerCAmelCase: List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCAmelCase: Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCAmelCase: Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase: Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase: Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case :
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=False , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[str]=0.02 , )-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Any = seq_length
__lowerCAmelCase: Union[str, Any] = is_training
__lowerCAmelCase: Optional[Any] = use_labels
__lowerCAmelCase: List[str] = vocab_size
__lowerCAmelCase: Optional[int] = hidden_size
__lowerCAmelCase: List[str] = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Optional[Any] = intermediate_size
__lowerCAmelCase: List[str] = hidden_act
__lowerCAmelCase: Tuple = hidden_dropout_prob
__lowerCAmelCase: Any = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: int = eos_token_id
__lowerCAmelCase: Dict = pad_token_id
__lowerCAmelCase: Optional[Any] = bos_token_id
__lowerCAmelCase: str = initializer_range
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
__lowerCAmelCase: List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
__lowerCAmelCase: List[str] = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2)
__lowerCAmelCase: List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Optional[Any] = prepare_blenderbot_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return config, inputs_dict
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: str = 2_0
__lowerCAmelCase: Dict = model_class_name(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Optional[Any] = model.encode(inputs_dict["input_ids"])
__lowerCAmelCase: Union[str, Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCAmelCase: str = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase: int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
__lowerCAmelCase: Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase: Dict = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
__lowerCAmelCase: Tuple = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: str = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}")
def lowercase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = 2_0
__lowerCAmelCase: Tuple = model_class_name(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Any = model.encode(inputs_dict["input_ids"])
__lowerCAmelCase: Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCAmelCase: Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
__lowerCAmelCase: Tuple = model.init_cache(decoder_input_ids.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase: str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase: Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
__lowerCAmelCase: str = model.decode(
decoder_input_ids[:, -1:] , _SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_SCREAMING_SNAKE_CASE , decoder_position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase: Union[str, Any] = model.decode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"Max diff is {diff}")
@require_flax
class snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 99
def lowercase_ ( self : int)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__lowerCAmelCase: Optional[Any] = input_ids.shape[0]
__lowerCAmelCase: Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = self._get_config_and_data()
__lowerCAmelCase: Dict = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Dict = lm_model(input_ids=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Union[str, Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self : str)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__lowerCAmelCase: Dict = FlaxBlenderbotForConditionalGeneration(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Any = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
__lowerCAmelCase: str = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
__lowerCAmelCase: str = lm_model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
__lowerCAmelCase: int = shift_tokens_right(_SCREAMING_SNAKE_CASE , 1 , 2)
__lowerCAmelCase: int = np.equal(_SCREAMING_SNAKE_CASE , 1).astype(np.floataa).sum()
__lowerCAmelCase: int = np.equal(_SCREAMING_SNAKE_CASE , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(_SCREAMING_SNAKE_CASE , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class snake_case ( _lowercase, unittest.TestCase, _lowercase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : List[str] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: int = FlaxBlenderbotModelTester(self)
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCAmelCase: Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase: int = model_class(_SCREAMING_SNAKE_CASE)
@jax.jit
def encode_jitted(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : List[Any]):
return model.encode(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)
with self.subTest("JIT Enabled"):
__lowerCAmelCase: str = encode_jitted(**_SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__lowerCAmelCase: Any = encode_jitted(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase_ ( self : Optional[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCAmelCase: Dict = model_class(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
__lowerCAmelCase: str = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any):
return model.decode(
decoder_input_ids=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , encoder_outputs=_SCREAMING_SNAKE_CASE , )
with self.subTest("JIT Enabled"):
__lowerCAmelCase: List[Any] = decode_jitted(**_SCREAMING_SNAKE_CASE).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__lowerCAmelCase: Optional[Any] = decode_jitted(**_SCREAMING_SNAKE_CASE).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , len(_SCREAMING_SNAKE_CASE))
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCAmelCase: List[str] = np.ones((1, 1)) * model.config.eos_token_id
__lowerCAmelCase: Dict = model(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
__lowerCAmelCase: Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__lowerCAmelCase: Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Tuple = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
__lowerCAmelCase: List[Any] = ['''Sam''']
__lowerCAmelCase: Dict = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="jax")
__lowerCAmelCase: List[str] = model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase: Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
__lowerCAmelCase: Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
assert generated_txt[0].strip() == tgt_text
| 217 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _a ( unittest.TestCase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=4 , )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = num_choices
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : int = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ : str = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_SCREAMING_SNAKE_CASE : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _snake_case ( datasets.BuilderConfig ):
lowerCAmelCase_ : Optional[datasets.Features] = None
def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" )
snake_case_ = partition_df.collect()
snake_case_ = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _snake_case ( _BaseExamplesIterable ):
def __init__( self , a__ , a__=None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = df
snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> int:
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable":
'''simple docstring'''
snake_case_ = self.split_shard_indices_by_worker(a__ , a__ )
return SparkExamplesIterable(self.df , partition_order=a__ )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.partition_order )
class _snake_case ( datasets.DatasetBuilder ):
lowerCAmelCase_ : Optional[Any] = SparkConfig
def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> Optional[Any]:
'''simple docstring'''
import pyspark
snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case_ = df
snake_case_ = working_dir
super().__init__(
cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
def create_cache_and_write_probe(a__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a__ )
snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
import pyspark
def get_arrow_batch_size(a__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
snake_case_ = self.df.count()
snake_case_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case_ = (
self.df.limit(a__ )
.repartition(1 )
.mapInArrow(a__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) )
snake_case_ = self.df.repartition(a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
'''simple docstring'''
import pyspark
snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter
snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath
snake_case_ = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case_ = self.config.features
snake_case_ = self._writer_batch_size
snake_case_ = self._fs.storage_options
def write_arrow(a__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case_ = pyspark.TaskContext().taskAttemptId()
snake_case_ = next(a__ , a__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
snake_case_ = 0
snake_case_ = writer_class(
features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([first_batch] )
writer.write_table(a__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
snake_case_ = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , )
snake_case_ = pa.Table.from_batches([batch] )
writer.write_table(a__ )
if writer._num_bytes > 0:
snake_case_ , snake_case_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a__ ) ):
snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) )
shutil.move(a__ , a__ )
snake_case_ = (
self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> Dict:
'''simple docstring'''
self._validate_cache_dir()
snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a__ )
snake_case_ = not is_remote_filesystem(self._fs )
snake_case_ = os.path.join if is_local else posixpath.join
snake_case_ = "-TTTTT-SSSSS-of-NNNNN"
snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
snake_case_ = path_join(self._output_dir , a__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
snake_case_ = []
for task_id, content in self._prepare_split_single(a__ , a__ , a__ ):
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a__ )
snake_case_ = total_num_examples
snake_case_ = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
snake_case_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a__ , a__ , a__ , ):
rename(
a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , )
snake_case_ = []
snake_case_ = 0
for i in range(len(a__ ) ):
snake_case_ , snake_case_ = task_id_and_num_shards[i]
for shard_id in range(a__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect()
else:
# don't use any pattern
snake_case_ = 0
snake_case_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , )
def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable:
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 92 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case : np.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(snake_case )
if rows != columns:
snake_case_ = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(snake_case )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(snake_case , snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowerCamelCase__ : str = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ = bs[:]
SCREAMING_SNAKE_CASE_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]="replace" , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : Any="<s>" , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Union[str, Any]="<pad>" , _lowerCAmelCase : Union[str, Any]="<mask>" , _lowerCAmelCase : Dict=False , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : int ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = []
for token in re.findall(self.pat , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
return self.decoder.get(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = ''.join(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ = ' ' + text
return (text, kwargs) | 225 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = False
return options
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 225 | 1 |
'''simple docstring'''
from math import ceil
def _lowerCamelCase ( lowerCamelCase_ : int = 1001 ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : int = 2 * i + 1
UpperCAmelCase_ : Tuple = 2 * i
UpperCAmelCase_ : int = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case__ : Tuple = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | '''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
snake_case__ : Optional[int] = '''1'''
snake_case__ : str = '''0'''
snake_case__ : List[str] = '''1'''
snake_case__ : List[str] = ort.SessionOptions()
snake_case__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case__ : Dict = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case__ : Dict = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case__ : str = ort.RunOptions()
snake_case__ : List[Any] = 128
snake_case__ : Union[str, Any] = 1
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Tuple = np.ones((batch, sequence), dtype=np.intaa)
snake_case__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case__ : Union[str, Any] = time.time()
snake_case__ : str = 2000
snake_case__ : Tuple = {}
for iter in range(max_iters):
snake_case__ : str = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 274 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A__ = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(_snake_case , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_lowerCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase = black.format_str(_snake_case , mode=_snake_case )
_lowerCAmelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(_snake_case , """w""" , newline="""\n""" ) as f:
f.write(_snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_snake_case )
with open(_snake_case , """r""" ) as f:
self.assertTrue(f.read() , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(_snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _snake_case ) , )
# Copy consistency with a really long name
_lowerCAmelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _snake_case , _snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _snake_case , overwrite_result=re.sub("""Bert""" , """TestModel""" , _snake_case ) , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme["""format_model_list"""] )
self.assertFalse(_snake_case )
self.assertEqual(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_snake_case )
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_lowerCAmelCase , _lowerCAmelCase = check_copies.convert_to_localized_md(
_snake_case , _snake_case , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(_snake_case , _snake_case )
| 82 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Tuple = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=a__)
a_ : Any = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=a__)
env_command_parser(subparsers=a__)
launch_command_parser(subparsers=a__)
tpu_command_parser(subparsers=a__)
test_command_parser(subparsers=a__)
# Let's go
a_ : Any = parser.parse_args()
if not hasattr(a__ , """func"""):
parser.print_help()
exit(1)
# Run
args.func(a__)
if __name__ == "__main__":
main()
| 248 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
lowercase = "lilt"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=4 , __UpperCAmelCase=1024 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = classifier_dropout
__UpperCamelCase = channel_shrink_ratio
__UpperCamelCase = max_ad_position_embeddings
| 351 |
"""simple docstring"""
from math import isqrt
def A ( snake_case :int ) -> list[int]:
__UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case , snake_case ):
__UpperCamelCase = False
return [i for i in range(2 , snake_case ) if is_prime[i]]
def A ( snake_case :int = 1_0**8 ) -> int:
__UpperCamelCase = calculate_prime_numbers(max_number // 2 )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
snake_case__ : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 2
while digits < n:
index += 1
snake_case__ : Any = len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 0 |
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> None:
__UpperCamelCase =size
__UpperCamelCase =[0] * size
__UpperCamelCase =[0] * size
@staticmethod
def _a ( A_ ) -> int:
return index | (index + 1)
@staticmethod
def _a ( A_ ) -> int:
return (index & (index + 1)) - 1
def _a ( self , A_ , A_ ) -> None:
__UpperCamelCase =value
while index < self.size:
__UpperCamelCase =self.get_prev(A_ ) + 1
if current_left_border == index:
__UpperCamelCase =value
else:
__UpperCamelCase =max(A_ , A_ , A_ )
__UpperCamelCase =self.get_next(A_ )
def _a ( self , A_ , A_ ) -> int:
right -= 1 # Because of right is exclusive
__UpperCamelCase =0
while left <= right:
__UpperCamelCase =self.get_prev(A_ )
if left <= current_left:
__UpperCamelCase =max(A_ , self.tree[right] )
__UpperCamelCase =current_left
else:
__UpperCamelCase =max(A_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
from ....utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_=None , A_=2048 ) -> Any:
__UpperCamelCase =config.__dict__
__UpperCamelCase =modal_hidden_size
if num_labels:
__UpperCamelCase =num_labels
| 117 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase : Tuple = random.Random()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=1.0 ,lowercase=None ,lowercase=None ) -> List[Any]:
if rng is None:
snake_case : Tuple = global_rng
snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_0_0 , A=2_0_0_0 , A=1 , A=0.0 , A=1_6_0_0_0 , A=True , A=8_0 , A=1_6 , A=6_4 , A="hann_window" , A=8_0 , A=7_6_0_0 , A=1e-1_0 , A=True , ) -> Optional[Any]:
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : Union[str, Any] = min_seq_length
snake_case : str = max_seq_length
snake_case : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : int = feature_size
snake_case : Any = padding_value
snake_case : Union[str, Any] = sampling_rate
snake_case : Union[str, Any] = do_normalize
snake_case : int = num_mel_bins
snake_case : Optional[int] = hop_length
snake_case : str = win_length
snake_case : Union[str, Any] = win_function
snake_case : int = fmin
snake_case : Optional[int] = fmax
snake_case : List[Any] = mel_floor
snake_case : str = return_attention_mask
def UpperCAmelCase ( self ) -> Optional[int]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase ( self , A=False , A=False ) -> Dict:
def _flatten(A ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
snake_case : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : List[str] = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase ( self , A=False , A=False ) -> int:
if equal_length:
snake_case : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : Dict = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_snake_case = SpeechTaFeatureExtractor
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = SpeechTaFeatureExtractionTester(self )
def UpperCAmelCase ( self , A ) -> str:
self.assertTrue(np.all(np.mean(__UpperCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Any = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case : str = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case : Optional[int] = feat_extract(__UpperCamelCase , return_tensors="""np""" ).input_values
snake_case : Union[str, Any] = feat_extract(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Dict = ["""longest""", """max_length""", """do_not_pad"""]
snake_case : Any = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case : List[str] = feat_extract(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""np""" )
snake_case : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : int = range(8_0_0 , 1_4_0_0 , 2_0_0 )
snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
snake_case : Tuple = ["""longest""", """max_length""", """do_not_pad"""]
snake_case : Optional[int] = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case : str = feat_extract(__UpperCamelCase , max_length=__UpperCamelCase , padding=__UpperCamelCase )
snake_case : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : List[str] = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
snake_case : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : int = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
snake_case : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Tuple = feat_extract(
__UpperCamelCase , truncation=__UpperCamelCase , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
snake_case : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def UpperCAmelCase ( self ) -> str:
snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Any = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case : str = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Tuple = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case : int = feature_extractor(audio_target=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case : Optional[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
snake_case : int = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
snake_case : Optional[Any] = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case : Optional[int] = np.asarray(__UpperCamelCase )
snake_case : List[str] = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
snake_case : List[str] = feature_extractor(__UpperCamelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.feat_extract_tester.prepare_inputs_for_target()
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Any = feat_extract.model_input_names[0]
snake_case : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCamelCase ) == len(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , processed_features[input_name] ) ) )
snake_case : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
snake_case : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
snake_case : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCamelCase )
snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : Optional[int] = feat_extract.model_input_names[0]
snake_case : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
snake_case : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case : int = self.feat_extract_tester.prepare_inputs_for_target()
snake_case : Dict = feat_extract.model_input_names[0]
snake_case : Tuple = BatchFeature({input_name: speech_inputs} )
snake_case : Tuple = feat_extract.num_mel_bins # hack!
snake_case : Optional[Any] = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
snake_case : Union[str, Any] = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = self.feat_extract_dict
snake_case : Union[str, Any] = True
snake_case : Tuple = self.feature_extraction_class(**__UpperCamelCase )
snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case : Any = [len(__UpperCamelCase ) for x in speech_inputs]
snake_case : Optional[int] = feat_extract.model_input_names[0]
snake_case : int = BatchFeature({input_name: speech_inputs} )
snake_case : Union[str, Any] = feat_extract.num_mel_bins # hack!
snake_case : Union[str, Any] = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCamelCase )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = self.feat_extract_dict
snake_case : int = True
snake_case : str = self.feature_extraction_class(**__UpperCamelCase )
snake_case : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
snake_case : Union[str, Any] = [len(__UpperCamelCase ) for x in speech_inputs]
snake_case : Union[str, Any] = feat_extract.model_input_names[0]
snake_case : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
snake_case : int = min(__UpperCamelCase )
snake_case : int = feat_extract.num_mel_bins # hack!
snake_case : int = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCAmelCase ( self , A ) -> str:
from datasets import load_dataset
snake_case : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case : Union[str, Any] = ds.sort("""id""" ).select(range(__UpperCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
snake_case : str = self._load_datasamples(1 )
snake_case : List[str] = SpeechTaFeatureExtractor()
snake_case : Union[str, Any] = feature_extractor(__UpperCamelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __UpperCamelCase , atol=1e-6 ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : int = torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
snake_case : Union[str, Any] = self._load_datasamples(1 )
snake_case : str = SpeechTaFeatureExtractor()
snake_case : Tuple = feature_extractor(audio_target=__UpperCamelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __UpperCamelCase , atol=1e-4 ) )
| 124 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def lowerCamelCase__ ( a__ : int , a__ : str , a__ : LevitConfig , a__ : Path , a__ : bool = True ) -> Any:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase_ = timm.create_model("""levit_128s""" , pretrained=a__ )
else:
UpperCamelCase_ = timm.create_model("""levit_128""" , pretrained=a__ )
if hidden_sizes == 192:
UpperCamelCase_ = timm.create_model("""levit_192""" , pretrained=a__ )
if hidden_sizes == 256:
UpperCamelCase_ = timm.create_model("""levit_256""" , pretrained=a__ )
if hidden_sizes == 384:
UpperCamelCase_ = timm.create_model("""levit_384""" , pretrained=a__ )
from_model.eval()
UpperCamelCase_ = LevitForImageClassificationWithTeacher(a__ ).eval()
UpperCamelCase_ = OrderedDict()
UpperCamelCase_ = from_model.state_dict()
UpperCamelCase_ = list(from_model.state_dict().keys() )
UpperCamelCase_ = list(our_model.state_dict().keys() )
print(len(a__ ) , len(a__ ) )
for i in range(len(a__ ) ):
UpperCamelCase_ = weights[og_keys[i]]
our_model.load_state_dict(a__ )
UpperCamelCase_ = torch.randn((2, 3, 224, 224) )
UpperCamelCase_ = from_model(a__ )
UpperCamelCase_ = our_model(a__ ).logits
assert torch.allclose(a__ , a__ ), "The model logits don't match the original one."
UpperCamelCase_ = name
print(a__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase__ ( a__ : Path , a__ : str = None , a__ : bool = True ) -> str:
UpperCamelCase_ = """imagenet-1k-id2label.json"""
UpperCamelCase_ = 1000
UpperCamelCase_ = (1, num_labels)
UpperCamelCase_ = """huggingface/label-files"""
UpperCamelCase_ = num_labels
UpperCamelCase_ = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_ = {int(a__ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
UpperCamelCase_ = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
UpperCamelCase_ = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
UpperCamelCase_ = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a__ , names_to_config[model_name] , a__ , a__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a__ , a__ , a__ , a__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 122 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Tuple ):
_a : str = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : Tuple = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] ,**_UpperCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
_a : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : int ):
_a : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase ,padding_value=1.0 )
_a : List[Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=_UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[int] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=_UpperCAmelCase )
_a : Optional[int] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_UpperCAmelCase ,return_tensors='np' )
_a : List[Any] = processor(images=_UpperCAmelCase ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_torch
def __lowercase ( self : Union[str, Any] ):
_a : List[str] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=_UpperCAmelCase )
_a : Dict = [torch.ones((1, 3, 5, 5) )]
_a : List[Any] = [[1764, 2646]]
_a : Union[str, Any] = [[683, 1024]]
_a : Optional[Any] = processor.post_process_masks(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
_a : Optional[int] = processor.post_process_masks(
_UpperCAmelCase ,torch.tensor(_UpperCAmelCase ) ,torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
_a : Optional[int] = [np.ones((1, 3, 5, 5) )]
_a : Optional[Any] = processor.post_process_masks(_UpperCAmelCase ,np.array(_UpperCAmelCase ) ,np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
_a : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
_a : Optional[Any] = processor.post_process_masks(_UpperCAmelCase ,np.array(_UpperCAmelCase ) ,np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ):
_a : str = tempfile.mkdtemp()
_a : Union[str, Any] = SamImageProcessor()
_a : str = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Tuple ,**_UpperCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ).image_processor
def __lowercase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ):
_a : str = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_a : Optional[Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : Optional[Any] ):
_a : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = self.get_image_processor(do_normalize=_UpperCAmelCase ,padding_value=1.0 )
_a : int = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=_UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = self.get_image_processor()
_a : Optional[int] = SamProcessor(image_processor=_UpperCAmelCase )
_a : Optional[Any] = self.prepare_image_inputs()
_a : Union[str, Any] = image_processor(_UpperCAmelCase ,return_tensors='np' )
_a : Union[str, Any] = processor(images=_UpperCAmelCase ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_tf
def __lowercase ( self : List[str] ):
_a : Tuple = self.get_image_processor()
_a : Tuple = SamProcessor(image_processor=_UpperCAmelCase )
_a : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
_a : List[str] = [[1764, 2646]]
_a : Any = [[683, 1024]]
_a : int = processor.post_process_masks(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
_a : Optional[int] = processor.post_process_masks(
_UpperCAmelCase ,tf.convert_to_tensor(_UpperCAmelCase ) ,tf.convert_to_tensor(_UpperCAmelCase ) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
_a : Optional[int] = [np.ones((1, 3, 5, 5) )]
_a : int = processor.post_process_masks(
_UpperCAmelCase ,np.array(_UpperCAmelCase ) ,np.array(_UpperCAmelCase ) ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : Any = processor.post_process_masks(
_UpperCAmelCase ,np.array(_UpperCAmelCase ) ,np.array(_UpperCAmelCase ) ,return_tensors='tf' )
@require_vision
@require_torchvision
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = tempfile.mkdtemp()
_a : str = SamImageProcessor()
_a : str = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[int] ,**_UpperCAmelCase : str ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Dict ):
_a : Any = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_a : List[str] = [Image.fromarray(np.moveaxis(_UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowercase ( self : Dict ):
_a : str = self.get_image_processor()
_a : Union[str, Any] = SamProcessor(image_processor=_UpperCAmelCase )
_a : Union[str, Any] = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
_a : Any = [tf.convert_to_tensor(_UpperCAmelCase )]
_a : Optional[int] = [torch.tensor(_UpperCAmelCase )]
_a : int = [[1764, 2646]]
_a : Any = [[683, 1024]]
_a : Tuple = processor.post_process_masks(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,return_tensors='tf' )
_a : Optional[Any] = processor.post_process_masks(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowercase ( self : Optional[Any] ):
_a : Optional[int] = self.get_image_processor()
_a : List[Any] = SamProcessor(image_processor=_UpperCAmelCase )
_a : Any = self.prepare_image_inputs()
_a : Optional[Any] = image_processor(_UpperCAmelCase ,return_tensors='pt' )['pixel_values'].numpy()
_a : int = processor(images=_UpperCAmelCase ,return_tensors='pt' )['pixel_values'].numpy()
_a : str = image_processor(_UpperCAmelCase ,return_tensors='tf' )['pixel_values'].numpy()
_a : List[str] = processor(images=_UpperCAmelCase ,return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase ,_UpperCAmelCase ) )
| 351 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( enum.Enum ):
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = 1
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : int = 'generated'
def __init__( self : List[Any] ,*_UpperCAmelCase : Any ,**_UpperCAmelCase : Optional[int] ):
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Optional[int]=None ,**_UpperCAmelCase : List[Any] ,):
_a : Tuple = {}
if truncation is not None:
_a : Union[str, Any] = truncation
_a : List[str] = generate_kwargs
_a : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_a : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_a : List[Any] = return_type
if clean_up_tokenization_spaces is not None:
_a : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : List[str] = self.tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
return True
def __lowercase ( self : str ,*_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ):
_a : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] ,_UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
_a : Optional[Any] = ([prefix + arg for arg in args[0]],)
_a : Any = True
elif isinstance(args[0] ,_UpperCAmelCase ):
_a : List[str] = (prefix + args[0],)
_a : str = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_a : Optional[Any] = self.tokenizer(*_UpperCAmelCase ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Any ):
_a : Tuple = super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
if (
isinstance(args[0] ,_UpperCAmelCase )
and all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for el in args[0] )
and all(len(_UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,**_UpperCAmelCase : int ):
_a : Dict = self._parse_and_tokenize(_UpperCAmelCase ,truncation=_UpperCAmelCase ,**_UpperCAmelCase )
return inputs
def __lowercase ( self : Any ,_UpperCAmelCase : str ,**_UpperCAmelCase : Tuple ):
if self.framework == "pt":
_a , _a : int = model_inputs['input_ids'].shape
elif self.framework == "tf":
_a , _a : Dict = tf.shape(model_inputs['input_ids'] ).numpy()
_a : Optional[Any] = generate_kwargs.get('min_length' ,self.model.config.min_length )
_a : Optional[int] = generate_kwargs.get('max_length' ,self.model.config.max_length )
self.check_inputs(_UpperCAmelCase ,generate_kwargs['min_length'] ,generate_kwargs['max_length'] )
_a : List[str] = self.model.generate(**_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = output_ids.shape[0]
if self.framework == "pt":
_a : int = output_ids.reshape(_UpperCAmelCase ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_a : Any = tf.reshape(_UpperCAmelCase ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowercase ( self : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any=ReturnType.TEXT ,_UpperCAmelCase : Dict=False ):
_a : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_a : int = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_a : str = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ,clean_up_tokenization_spaces=_UpperCAmelCase ,)
}
records.append(_UpperCAmelCase )
return records
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'summary'
def __call__( self : Any ,*_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[str] ):
return super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[Any] = 'translation'
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def __lowercase ( self : Any ,*_UpperCAmelCase : Any ,_UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Any=None ):
if getattr(self.tokenizer ,'_build_translation_inputs' ,_UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*_UpperCAmelCase ,return_tensors=self.framework ,truncation=_UpperCAmelCase ,src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase )
else:
return super()._parse_and_tokenize(*_UpperCAmelCase ,truncation=_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Dict ):
_a , _a , _a : str = super()._sanitize_parameters(**_UpperCAmelCase )
if src_lang is not None:
_a : Optional[int] = src_lang
if tgt_lang is not None:
_a : List[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_a : int = kwargs.get('task' ,self.task )
_a : int = task.split('_' )
if task and len(_UpperCAmelCase ) == 4:
# translation, XX, to YY
_a : List[Any] = items[1]
_a : Any = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] ,*_UpperCAmelCase : Tuple ,**_UpperCAmelCase : Optional[int] ):
return super().__call__(*_UpperCAmelCase ,**_UpperCAmelCase )
| 107 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A__: str = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a ( datasets.BuilderConfig):
"""simple docstring"""
UpperCamelCase__ = None
UpperCamelCase__ = """utf-8"""
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = True # deprecated
UpperCamelCase__ = None # deprecated
UpperCamelCase__ = 10 << 20 # 10MB
UpperCamelCase__ = None
class _a ( datasets.ArrowBasedBuilder):
"""simple docstring"""
UpperCamelCase__ = JsonConfig
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
UpperCamelCase__: int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCamelCase__: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
UpperCamelCase__: Union[str, Any] = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: Any = [files]
UpperCamelCase__: Dict = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCamelCase__: Tuple = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: Tuple = [files]
UpperCamelCase__: int = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase__: Union[str, Any] = self.config.features.arrow_schema.field(__lowerCamelCase ).type
UpperCamelCase__: Any = pa_table.append_column(__lowerCamelCase , pa.array([None] * len(__lowerCamelCase ) , type=__lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase__: Optional[int] = table_cast(__lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Dict ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__: Optional[Any] = json.load(__lowerCamelCase )
# We keep only the field we are interested in
UpperCamelCase__: Dict = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCamelCase , (list, tuple) ):
UpperCamelCase__: Union[str, Any] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__: Union[str, Any] = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
else:
UpperCamelCase__: Optional[Any] = dataset
UpperCamelCase__: Optional[Any] = pa.Table.from_pydict(__lowerCamelCase )
yield file_idx, self._cast_table(__lowerCamelCase )
# If the file has one json object per line
else:
with open(__lowerCamelCase , "rb" ) as f:
UpperCamelCase__: Optional[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase__: Dict = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase__: Dict = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
UpperCamelCase__: Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase__: Tuple = batch.decode(self.config.encoding , errors=__lowerCamelCase ).encode("utf-8" )
try:
while True:
try:
UpperCamelCase__: int = paj.read_json(
io.BytesIO(__lowerCamelCase ) , read_options=paj.ReadOptions(block_size=__lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCamelCase )
or block_size > len(__lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(__lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase__: Optional[int] = json.load(__lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ): # list is the only sequence type supported in JSON
try:
UpperCamelCase__: List[str] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase__: str = {col: [row.get(__lowerCamelCase ) for row in dataset] for col in keys}
UpperCamelCase__: Optional[Any] = pa.Table.from_pydict(__lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(__lowerCamelCase )
break
else:
logger.error(F"Failed to read file \'{file}\' with error {type(__lowerCamelCase )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
batch_idx += 1
| 149 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase: Tuple = logging.get_logger(__name__)
_UpperCamelCase: Optional[int] = '▁'
_UpperCamelCase: List[Any] = {'vocab_file': 'prophetnet.tokenizer'}
_UpperCamelCase: str = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
_UpperCamelCase: Any = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
_UpperCamelCase: Dict = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as reader:
lowercase : Tuple = reader.readlines()
for index, token in enumerate(_UpperCAmelCase ):
lowercase : Dict = token.rstrip('\n' )
lowercase : Optional[Any] = index
return vocab
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Any="[SEP]", lowerCAmelCase : int="[SEP]", lowerCAmelCase : Tuple="[SEP]", lowerCAmelCase : Union[str, Any]="[UNK]", lowerCAmelCase : Optional[Any]="[PAD]", lowerCAmelCase : int="[CLS]", lowerCAmelCase : Union[str, Any]="[MASK]", lowerCAmelCase : Optional[Dict[str, Any]] = None, **lowerCAmelCase : str, ) -> None:
lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, sep_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
lowercase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase : Tuple = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
lowercase : List[str] = f'''[unused{i}]'''
lowercase : Any = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase : Any = 12
lowercase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowerCAmelCase )
def __getstate__( self : int ) -> str:
lowercase : int = self.__dict__.copy()
lowercase : Union[str, Any] = None
return state
def __setstate__( self : Tuple, lowerCAmelCase : Optional[Any] ) -> int:
lowercase : str = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowercase : Any = {}
lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None, lowerCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase, token_ids_a=lowerCAmelCase, already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def lowercase ( self : int, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowercase : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : List[str] ) -> int:
return len(self.sp_model ) + self.fairseq_offset
def lowercase ( self : Union[str, Any] ) -> str:
lowercase : str = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Dict, lowerCAmelCase : str ) -> str:
return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase )
def lowercase ( self : List[str], lowerCAmelCase : Any ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Dict = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self : Dict, lowerCAmelCase : Optional[Any] ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : Tuple, lowerCAmelCase : List[str] ) -> Optional[Any]:
lowercase : Tuple = ''.join(lowerCAmelCase ).replace(lowerCAmelCase, ' ' ).strip()
return out_string
def lowercase ( self : List[Any], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : List[Any] = os.path.join(
lowerCAmelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase, 'wb' ) as fi:
lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def lowercase ( self : List[Any], lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase : int = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 255 | 0 |
from __future__ import annotations
def a_ ( __lowercase : list[int] , __lowercase : int ) -> list[list[int]]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(__lowercase )
create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return result
def a_ ( __lowercase : list[int] , __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : list[list[int]] , __lowercase : int , ) -> None:
if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum:
return
if sum(__lowercase ) == max_sum:
result.append(__lowercase )
return
for index in range(__lowercase , len(__lowercase ) ):
create_state_space_tree(
__lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , )
_lowerCamelCase : List[str] = [3, 34, 4, 12, 5, 2]
_lowerCamelCase : List[Any] = 9
_lowerCamelCase : Tuple = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 130 |
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 130 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _A ( unittest.TestCase):
SCREAMING_SNAKE_CASE : List[str] = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_ : str = text_generator('This is a test' , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
SCREAMING_SNAKE_CASE_ : Dict = text_generator('This is a test' , do_sample=_SCREAMING_SNAKE_CASE , num_return_sequences=2 , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
] , )
SCREAMING_SNAKE_CASE_ : Dict = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = '<pad>'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_generator(
['This is a test', 'This is a second test'] , do_sample=_SCREAMING_SNAKE_CASE , num_return_sequences=2 , batch_size=2 , return_tensors=_SCREAMING_SNAKE_CASE , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
{'generated_token_ids': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_ : List[str] = text_generator('This is a test' , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_generator(['This is a test', 'This is a second test'] , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TextGenerationPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'Hello I believe in'
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ : Any = text_generator(_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_generator(_SCREAMING_SNAKE_CASE , stop_sequence=' fe' )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': 'Hello I believe in fe'}] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = text_generator.model
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_generator.tokenizer
SCREAMING_SNAKE_CASE_ : Dict = text_generator('This is a test' )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_generator('This is a test' , return_full_text=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE_ : Tuple = pipeline(task='text-generation' , model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , return_full_text=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = text_generator('This is a test' )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE_ : Any = text_generator('This is a test' , return_full_text=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE_ : str = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}, {'generated_text': ANY(_SCREAMING_SNAKE_CASE )}],
[{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}, {'generated_text': ANY(_SCREAMING_SNAKE_CASE )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE_ : int = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}, {'generated_text': ANY(_SCREAMING_SNAKE_CASE )}],
[{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}, {'generated_text': ANY(_SCREAMING_SNAKE_CASE )}],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = text_generator('test' , return_full_text=_SCREAMING_SNAKE_CASE , return_text=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = text_generator('test' , return_full_text=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_generator('test' , return_text=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_generator('' )
self.assertEqual(_SCREAMING_SNAKE_CASE , [{'generated_text': ANY(_SCREAMING_SNAKE_CASE )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE_ : Dict = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE_ : List[Any] = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE_ : List[str] = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_ : int = pipe('This is a test' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe('This is a test' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE_ : Any = pipe('This is a test' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ : Tuple = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=_SCREAMING_SNAKE_CASE , top_p=0.5 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 'Hello world'
SCREAMING_SNAKE_CASE_ : int = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger('transformers.generation.tf_utils' )
else:
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger('transformers.generation.utils' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
SCREAMING_SNAKE_CASE_ : str = text_generator(_SCREAMING_SNAKE_CASE , max_length=10 , max_new_tokens=1 )
self.assertIn(_SCREAMING_SNAKE_CASE , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
SCREAMING_SNAKE_CASE_ : str = text_generator(_SCREAMING_SNAKE_CASE , max_new_tokens=1 )
self.assertNotIn(_SCREAMING_SNAKE_CASE , cl.out )
with CaptureLogger(_SCREAMING_SNAKE_CASE ) as cl:
SCREAMING_SNAKE_CASE_ : int = text_generator(_SCREAMING_SNAKE_CASE , max_length=10 )
self.assertNotIn(_SCREAMING_SNAKE_CASE , cl.out )
| 253 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict=7 , lowercase_ : int=3 , lowercase_ : str=18 , lowercase_ : Tuple=30 , lowercase_ : Tuple=400 , lowercase_ : List[str]=True , lowercase_ : List[str]=32 , lowercase_ : Optional[int]=True , ):
UpperCamelCase__ : Dict =parent
UpperCamelCase__ : Dict =batch_size
UpperCamelCase__ : List[str] =num_channels
UpperCamelCase__ : Optional[int] =image_size
UpperCamelCase__ : Optional[int] =min_resolution
UpperCamelCase__ : Union[str, Any] =max_resolution
UpperCamelCase__ : List[str] =do_resize
UpperCamelCase__ : Union[str, Any] =size_divisor
UpperCamelCase__ : Union[str, Any] =do_rescale
def _lowerCAmelCase ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __a ( snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : int =GLPNImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ , '''size_divisor''' ) )
self.assertTrue(hasattr(lowercase_ , '''resample''' ) )
self.assertTrue(hasattr(lowercase_ , '''do_rescale''' ) )
def _lowerCAmelCase ( self : Optional[int] ):
pass
def _lowerCAmelCase ( self : Any ):
# Initialize image_processing
UpperCamelCase__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self : Dict ):
# Initialize image_processing
UpperCamelCase__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
UpperCamelCase__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 157 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157 | 1 |
"""simple docstring"""
import functools
from typing import Any
def lowercase ( _snake_case : str , _snake_case : list[str] ) ->bool:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(_snake_case , _snake_case ) or not all(
isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__snake_case : dict[str, Any] = {}
__snake_case : int = '''WORD_KEEPER'''
for word in words:
__snake_case : List[Any] = trie
for c in word:
if c not in trie_node:
__snake_case : Union[str, Any] = {}
__snake_case : List[str] = trie_node[c]
__snake_case : List[str] = True
__snake_case : Dict = len(_snake_case )
# Dynamic programming method
@functools.cache
def is_breakable(_snake_case : int ) -> bool:
if index == len_string:
return True
__snake_case : List[str] = trie
for i in range(_snake_case , _snake_case ):
__snake_case : Dict = trie_node.get(string[i] , _snake_case )
if trie_node is None:
return False
if trie_node.get(_snake_case , _snake_case ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def UpperCamelCase_( ) -> List[str]:
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : Optional[Any] = parser.parse_args()
return args.f
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = {}
_lowercase : Union[str, Any] = os.path.join(lowerCamelCase_ , 'all_results.json' )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , 'r' ) as f:
_lowercase : List[str] = json.load(lowerCamelCase_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def UpperCamelCase_( ) -> Optional[Any]:
_lowercase : Any = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
SCREAMING_SNAKE_CASE : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCamelCase( _a ):
@classmethod
def UpperCamelCase ( cls) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = tempfile.mkdtemp()
_lowercase : Dict = os.path.join(cls.tmpdir, 'default_config.yml')
write_basic_config(save_location=cls.configPath)
_lowercase : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.get_auto_remove_tmp_dir()
_lowercase : int = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
run_command(self._launch_args + testargs)
_lowercase : List[Any] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.7_5)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'glue_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = self.get_auto_remove_tmp_dir()
_lowercase : Optional[Any] = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
_lowercase : str = get_results(lowerCamelCase)
self.assertLess(result['perplexity'], 1_00)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'clm_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowercase : str = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Optional[int] = get_results(lowerCamelCase)
self.assertLess(result['perplexity'], 42)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'mlm_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
_lowercase : Tuple = self.get_auto_remove_tmp_dir()
_lowercase : Any = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : int = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.7_5)
self.assertLess(result['train_loss'], 0.5)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'ner_no_trainer')))
@unittest.skip(reason='Fix me @muellerzr')
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.get_auto_remove_tmp_dir()
_lowercase : Union[str, Any] = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : List[str] = get_results(lowerCamelCase)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'], 28)
self.assertGreaterEqual(result['eval_exact'], 28)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'qa_no_trainer')))
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.get_auto_remove_tmp_dir()
_lowercase : int = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : List[Any] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'swag_no_trainer')))
@slow
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = self.get_auto_remove_tmp_dir()
_lowercase : Dict = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Optional[Any] = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_rouge1'], 10)
self.assertGreaterEqual(result['eval_rouge2'], 2)
self.assertGreaterEqual(result['eval_rougeL'], 7)
self.assertGreaterEqual(result['eval_rougeLsum'], 7)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'summarization_no_trainer')))
@slow
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : Optional[Any] = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Tuple = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_bleu'], 30)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'epoch_0')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'translation_no_trainer')))
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
_lowercase : Any = self.get_auto_remove_tmp_dir()
_lowercase : str = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs)
_lowercase : Dict = get_results(lowerCamelCase)
self.assertGreaterEqual(result['eval_overall_accuracy'], 0.1_0)
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowercase : List[Any] = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
run_command(self._launch_args + testargs)
_lowercase : str = get_results(lowerCamelCase)
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'], 0.6)
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'step_1')))
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase, 'image_classification_no_trainer')))
| 84 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE : str = logging.getLogger()
def UpperCamelCase_( ) -> Any:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument('-f' )
_lowercase : Optional[Any] = parser.parse_args()
return args.f
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> None:
"""simple docstring"""
_lowercase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, 'run_glue_deebert.py')
with patch.object(lowerCamelCase, 'argv', lowerCamelCase):
_lowercase : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase, 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
_lowercase : Union[str, Any] = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase)
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case__ , unittest.TestCase ):
_a : Dict = KandinskyImgaImgPipeline
_a : List[Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
_a : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
_a : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_a : int = False
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 3_2
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1_0_0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__lowerCAmelCase = MultilingualCLIP(_A )
__lowerCAmelCase = text_encoder.eval()
return text_encoder
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__lowerCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__lowerCAmelCase = DDIMScheduler(**_A )
__lowerCAmelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCAmelCase = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCAmelCase = "A red cartoon frog, 4k"
__lowerCAmelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__lowerCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCAmelCase = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 92 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Dict = """naver-clova-ix/donut-base-finetuned-docvqa"""
lowerCAmelCase : Optional[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowerCAmelCase : Optional[int] = """document_qa"""
lowerCAmelCase : Tuple = AutoProcessor
lowerCAmelCase : Optional[Any] = VisionEncoderDecoderModel
lowerCAmelCase : Dict = ["""image""", """text"""]
lowerCAmelCase : Tuple = ["""text"""]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = task_prompt.replace("{user_input}" , UpperCAmelCase__ )
A__ = self.pre_processor.tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors="pt" ).input_ids
A__ = self.pre_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , UpperCAmelCase__ ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences
def __A ( self , UpperCAmelCase__ ):
A__ = self.pre_processor.batch_decode(UpperCAmelCase__ )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
A__ = re.sub(R"<.*?>" , "" , UpperCAmelCase__ , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(UpperCAmelCase__ )
return sequence["answer"]
| 198 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : Tuple )-> Dict:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : int )-> Optional[Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : str , _A : Optional[Any]=None )-> str:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : Tuple , _A : Tuple , _A : int , _A : str , _A : str = WEIGHTS_NAME )-> List[str]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Any = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 198 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A : Any = logging.get_logger(__name__)
A : Union[str, Any] = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : str = '''imagegpt'''
__lowerCamelCase : Union[str, Any] = ['''past_key_values''']
__lowerCamelCase : Optional[int] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , __lowerCAmelCase : Optional[Any]=5_12 + 1 , __lowerCAmelCase : Union[str, Any]=32 * 32 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : List[Any]=24 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[int]="quick_gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=1e-5 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False , **__lowerCAmelCase : str , ) -> Tuple:
"""simple docstring"""
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = scale_attn_weights
A__ = use_cache
A__ = scale_attn_by_inverse_layer_idx
A__ = reorder_and_upcast_attn
A__ = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def a_ ( self : Optional[Any] , __lowerCAmelCase : "FeatureExtractionMixin" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = dict(preprocessor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) )
return inputs
| 274 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
A : List[str] = input('''Enter image url: ''').strip()
print(F'''Downloading image from {url} ...''')
A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
A : Dict = requests.get(image_url).content
A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 274 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 353 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
"""simple docstring"""
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 102 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCAmelCase :
def __init__( self , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = 30
_lowerCAmelCase = self.seq_length + self.mem_len
_lowerCAmelCase = 15
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = [10, 50, 80]
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 2
_lowerCAmelCase = None
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 3
_lowerCAmelCase = self.vocab_size - 1
_lowerCAmelCase = 0.01
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLModel(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLLMHeadModel(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase = model(_snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLForSequenceClassification(_snake_case )
_lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCamelCase = () if is_tf_available() else ()
__lowerCamelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case , d_embed=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
self.model_tester.set_seed()
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase = model.get_output_embeddings()
assert isinstance(_snake_case , tf.keras.layers.Layer )
_lowerCAmelCase = model.get_bias()
assert name is None
else:
_lowerCAmelCase = model.get_output_embeddings()
assert x is None
_lowerCAmelCase = model.get_bias()
assert name is None
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFTransfoXLModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def snake_case ( self ):
"""simple docstring"""
pass
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase = model.generate(_snake_case , max_length=200 , do_sample=_snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , _snake_case )
| 82 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __a ( A__ ):
_lowerCAmelCase : int = (UniPCMultistepScheduler,)
_lowerCAmelCase : List[str] = (('''num_inference_steps''', 2_5),)
def __lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : str = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=0 , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : List[Any] = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.dummy_sample
UpperCamelCase__ : str = 0.1 * sample
UpperCamelCase__ : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ , UpperCamelCase__ : Dict = sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : str = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=0 , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Tuple = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.dummy_sample
UpperCamelCase__ : Optional[Any] = 0.1 * sample
UpperCamelCase__ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Any = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if scheduler is None:
UpperCamelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = 10
UpperCamelCase__ : Optional[Any] = self.dummy_model()
UpperCamelCase__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Any = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : str = self.get_scheduler_config()
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.dummy_sample
UpperCamelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
UpperCamelCase__ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
UpperCamelCase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase__ : Tuple = scheduler.timesteps[5]
UpperCamelCase__ : str = scheduler.timesteps[6]
UpperCamelCase__ : str = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : str = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ : List[str] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
UpperCamelCase__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : str = self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __lowercase ( self : Any ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : str = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = 10
UpperCamelCase__ : List[Any] = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 196 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a ( A__ ):
_lowerCAmelCase : str = '''decision_transformer'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=17 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[int]=1_28 , SCREAMING_SNAKE_CASE : Dict=40_96 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=10_24 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="relu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : int=5_02_56 , SCREAMING_SNAKE_CASE : List[Any]=5_02_56 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = state_dim
UpperCamelCase__ : Optional[int] = act_dim
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = max_ep_len
UpperCamelCase__ : Optional[Any] = action_tanh
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : List[str] = n_positions
UpperCamelCase__ : Tuple = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : Dict = n_inner
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[str] = resid_pdrop
UpperCamelCase__ : Optional[int] = embd_pdrop
UpperCamelCase__ : Optional[Any] = attn_pdrop
UpperCamelCase__ : Optional[Any] = layer_norm_epsilon
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Dict = scale_attn_weights
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : List[str] = scale_attn_by_inverse_layer_idx
UpperCamelCase__ : Union[str, Any] = reorder_and_upcast_attn
UpperCamelCase__ : Optional[Any] = bos_token_id
UpperCamelCase__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) | 196 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case__ : Any = logging.getLogger(__name__)
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = np.argmax(lowerCamelCase , axis=1 )
return np.sum(outputs == labels )
def _a ( lowerCamelCase: Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(lowerCamelCase , encoding='''utf_8''' ) as f:
__A = csv.reader(lowerCamelCase )
__A = []
next(lowerCamelCase ) # skip the first line
for line in tqdm(lowerCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( lowerCamelCase: str , lowerCamelCase: int , lowerCamelCase: Optional[int] , lowerCamelCase: List[str] , lowerCamelCase: Any , lowerCamelCase: Optional[int] ) -> int:
'''simple docstring'''
__A = []
for dataset in encoded_datasets:
__A = len(lowerCamelCase )
__A = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__A = np.zeros((n_batch, 2) , dtype=np.intaa )
__A = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__A = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase ):
__A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A = with_conta
__A = with_conta
__A = len(lowerCamelCase ) - 1
__A = len(lowerCamelCase ) - 1
__A = with_conta
__A = with_conta
__A = mc_label
__A = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase ) for t in all_inputs ) )
return tensor_datasets
def _a ( ) -> Union[str, Any]:
'''simple docstring'''
__A = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=lowerCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=lowerCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=lowerCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=lowerCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=lowerCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=lowerCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=lowerCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=lowerCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=lowerCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=lowerCamelCase , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=lowerCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=lowerCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=lowerCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=lowerCamelCase , default=3_74 )
parser.add_argument('''--server_ip''' , type=lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
__A = parser.parse_args()
print(lowerCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__A = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(lowerCamelCase , lowerCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__A = ['''_start_''', '''_delimiter_''', '''_classify_''']
__A = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCamelCase )
__A = tokenizer.convert_tokens_to_ids(lowerCamelCase )
__A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCamelCase ) )
model.to(lowerCamelCase )
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase: str ):
if isinstance(lowerCamelCase , lowerCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase ) )
elif isinstance(lowerCamelCase , lowerCamelCase ):
return obj
return [tokenize_and_encode(lowerCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__A = load_rocstories_dataset(args.train_dataset )
__A = load_rocstories_dataset(args.eval_dataset )
__A = (train_dataset, eval_dataset)
__A = tokenize_and_encode(lowerCamelCase )
# Compute the max input length for the Transformer
__A = model.config.n_positions // 2 - 2
__A = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__A = min(lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__A = pre_process_datasets(lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase )
__A , __A = tensor_datasets[0], tensor_datasets[1]
__A = TensorDataset(*lowerCamelCase )
__A = RandomSampler(lowerCamelCase )
__A = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.train_batch_size )
__A = TensorDataset(*lowerCamelCase )
__A = SequentialSampler(lowerCamelCase )
__A = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__A = args.max_steps
__A = args.max_steps // (len(lowerCamelCase ) // args.gradient_accumulation_steps) + 1
else:
__A = len(lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__A = list(model.named_parameters() )
__A = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__A = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__A = AdamW(lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__A = get_linear_schedule_with_warmup(
lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCamelCase )
if args.do_train:
__A , __A , __A = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
__A = 0
__A = 0
__A = tqdm(lowerCamelCase , desc='''Training''' )
for step, batch in enumerate(lowerCamelCase ):
__A = tuple(t.to(lowerCamelCase ) for t in batch )
__A , __A , __A , __A = batch
__A = model(lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase )
__A = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__A = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__A = '''Training loss: {:.2e} lr: {:.2e}'''.format(lowerCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__A = model.module if hasattr(lowerCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__A = os.path.join(args.output_dir , lowerCamelCase )
__A = os.path.join(args.output_dir , lowerCamelCase )
torch.save(model_to_save.state_dict() , lowerCamelCase )
model_to_save.config.to_json_file(lowerCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__A = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__A = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCamelCase )
if args.do_eval:
model.eval()
__A , __A = 0, 0
__A , __A = 0, 0
for batch in tqdm(lowerCamelCase , desc='''Evaluating''' ):
__A = tuple(t.to(lowerCamelCase ) for t in batch )
__A , __A , __A , __A = batch
with torch.no_grad():
__A , __A , __A , __A = model(
lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase )
__A = mc_logits.detach().cpu().numpy()
__A = mc_labels.to('''cpu''' ).numpy()
__A = accuracy(lowerCamelCase , lowerCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__A = eval_loss / nb_eval_steps
__A = eval_accuracy / nb_eval_examples
__A = tr_loss / nb_tr_steps if args.do_train else None
__A = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__A = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 117 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case__ : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case__ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A_ :
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class A_ :
def __init__(self :Dict , _UpperCamelCase :Iterable[int] )-> None:
__A = None
for i in sorted(_UpperCamelCase , reverse=_UpperCamelCase ):
__A = Node(_UpperCamelCase , self.head )
def __iter__(self :List[str] )-> Iterator[int]:
__A = self.head
while node:
yield node.data
__A = node.next_node
def __len__(self :Union[str, Any] )-> int:
return sum(1 for _ in self )
def __str__(self :List[Any] )-> str:
return " -> ".join([str(_UpperCamelCase ) for node in self] )
def _a ( lowerCamelCase: SortedLinkedList , lowerCamelCase: SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase ) + list(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Any = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 117 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
UpperCAmelCase_ : Tuple = False
class UpperCamelCase ( unittest.TestCase ):
def __A ( self , UpperCAmelCase__=32 ):
set_seed(0 )
A__ = UNetaDModel(sample_size=UpperCAmelCase__ , in_channels=3 , out_channels=3 )
A__ = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def __A ( self ):
A__ = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
A__ = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase__ , )
A__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
A__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCAmelCase__ ) for _ in range(4 )]
A__ = [torch.randn((4, 3, 32, 32) ).to(UpperCAmelCase__ ) for _ in range(4 )]
A__ = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
A__ , A__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
A__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ = model(UpperCAmelCase__ , timesteps[i] ).sample
A__ = torch.nn.functional.mse_loss(UpperCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
A__ , A__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
A__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ = model(UpperCAmelCase__ , timesteps[i] ).sample
A__ = torch.nn.functional.mse_loss(UpperCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-5 ) )
| 352 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A : List[Any] , _A : List[str]=7 )-> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A : str )-> Dict:
"""simple docstring"""
A__ = get_daily_ci_runs(_A )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def UpperCamelCase ( _A : int , _A : List[str] , _A : str )-> Any:
"""simple docstring"""
A__ = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : List[Any] )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(_A , f"""{artifact_name}.zip""" )
if os.path.isfile(_A ):
A__ = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 198 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Dict ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
return model
def _lowercase ( self : List[str] ):
__lowercase = self.dummy_uncond_unet
__lowercase = KarrasVeScheduler()
__lowercase = KarrasVePipeline(unet=__lowerCamelCase, scheduler=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2, generator=__lowerCamelCase, output_type="numpy" ).images
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2, generator=__lowerCamelCase, output_type="numpy", return_dict=__lowerCamelCase )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
__lowercase = "google/ncsnpp-celebahq-256"
__lowercase = UNetaDModel.from_pretrained(__lowerCamelCase )
__lowercase = KarrasVeScheduler()
__lowercase = KarrasVePipeline(unet=__lowerCamelCase, scheduler=__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(num_inference_steps=2_0, generator=__lowerCamelCase, output_type="numpy" ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowercase = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 17 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( A : Tuple, A : List[Any], A : List[Any], A : Dict ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __magic_name__ ( A : List[Any], A : int, A : Optional[Any], A : Optional[int], A : Any=True ):
'''simple docstring'''
model.train()
a = model(A )
a = F.mse_loss(A, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(A )
def __magic_name__ ( A : Any, A : Any=False ):
'''simple docstring'''
set_seed(42 )
a = RegressionModel()
a = deepcopy(A )
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
model.to(accelerator.device )
if sched:
a = AdamW(params=model.parameters(), lr=1E-3 )
a = AdamW(params=ddp_model.parameters(), lr=1E-3 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
a = LambdaLR(A, lr_lambda=lambda A : epoch**0.65 )
# Make a copy of `model`
if sched:
a , a , a , a = accelerator.prepare(A, A, A, A )
else:
a , a = accelerator.prepare(A, A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(A, A, A, A )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
a , a , a = get_training_setup(A )
# Use a single batch
a , a = next(iter(A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(A ):
step_model(A, A, A, A )
else:
# Sync grads
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
def __magic_name__ ( A : List[Any]=False, A : List[Any]=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a = get_training_setup(A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(A, A, A, A, A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(A ):
step_model(A, A, A, A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a = ddp_input[torch.randperm(len(A ) )]
GradientState._reset_state()
def __magic_name__ ( A : List[Any]=False, A : Any=False ):
'''simple docstring'''
a = Accelerator(
split_batches=A, dispatch_batches=A, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a , a , a , a , a , a , a = get_training_setup(A, A )
for iteration, batch in enumerate(A ):
a , a = batch.values()
# Gather the distributed inputs and targs for the base model
a , a = accelerator.gather((ddp_input, ddp_target) )
a , a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(A, A, A, A, A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(A ):
step_model(A, A, A, A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A ))
if accelerator.num_processes > 1:
check_model_parameters(A, A, A, A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = RegressionDataset(length=80 )
a = DataLoader(A, batch_size=16 )
a = RegressionDataset(length=96 )
a = DataLoader(A, batch_size=16 )
a , a = accelerator.prepare(A, A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if iteration < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(A )
if batch_num < len(A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
'''simple docstring'''
a = Accelerator()
a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation(A, A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, ", F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", )
test_gradient_accumulation_with_opt_and_scheduler(A, A )
def __magic_name__ ( A : Optional[int] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 107 | 0 |
from pathlib import Path
import fire
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int ) -> str:
__snake_case = Path(snake_case_ )
__snake_case = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
__snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 370 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
snake_case_ = {'allegro/herbert-base-cased': 514}
snake_case_ = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = HerbertTokenizer
def __init__(self : Dict , a__ : Tuple=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : Optional[int]="<s>" , a__ : Optional[Any]="<unk>" , a__ : Any="<pad>" , a__ : List[Any]="<mask>" , a__ : Any="</s>" , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , )
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 238 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = analyze_text(lowerCamelCase__ )
lowercase__ : Any = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
lowercase__ : str = sum(single_char_strings.values() )
# one length string
lowercase__ : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowercase__ : Tuple = single_char_strings[ch]
lowercase__ : str = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowercase__ : Union[str, Any] = sum(two_char_strings.values() )
lowercase__ : Union[str, Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowercase__ : List[Any] = cha + cha
if sequence in two_char_strings:
lowercase__ : Optional[int] = two_char_strings[sequence]
lowercase__ : Optional[Any] = int(lowerCamelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = Counter() # type: ignore
lowercase__ : List[str] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 130 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 6_4, 6_4)
lowerCAmelCase__ = torch.rand(1) * 9_9_9
lowerCAmelCase__ = torch.randn(2, 7_7, 7_6_8)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 6_6_6
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 130 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(_UpperCAmelCase )
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_UpperCAmelCase )
self.assertListEqual(dset.column_names, ["col_1", "col_2"] )
for i, r in enumerate(_UpperCAmelCase ):
self.assertDictEqual(_UpperCAmelCase, example_records[i] )
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info, dset_from_dict.info )
def A_ ( self : str ) -> Any: # checks what happens with missing columns
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [{"col_1": 1}, {"col_2": "x"}]
SCREAMING_SNAKE_CASE__ : List[str] = Dataset.from_list(_UpperCAmelCase )
self.assertDictEqual(dset[0], {"col_1": 1} )
self.assertDictEqual(dset[1], {"col_1": None} ) # NB: first record is used for columns
def A_ ( self : Dict ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [{"col_1": []}, {"col_1": [1, 2]}]
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_list(_UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64" ) ) )
def A_ ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(_UpperCAmelCase ), 0 )
self.assertListEqual(dset.column_names, [] )
| 191 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
_snake_case = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Dict ) -> str:
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Dict ) -> Any:
__UpperCAmelCase : Dict = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Tuple ) -> Optional[Any]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__UpperCAmelCase : Any = f'''facebook/wmt19-{pair}'''
__UpperCAmelCase : Dict = self.get_tokenizer(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.get_model(__lowerCamelCase )
__UpperCAmelCase : Tuple = bleu_data[pair]["src"]
__UpperCAmelCase : Optional[Any] = bleu_data[pair]["tgt"]
__UpperCAmelCase : Optional[Any] = tokenizer(__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase , padding="longest" ).to(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__UpperCAmelCase : List[str] = tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
__UpperCAmelCase : Any = calculate_bleu(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores["bleu"] , __lowerCamelCase )
| 157 | import numpy as np
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( snake_case__ ) -> np.ndarray:
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def __lowerCamelCase ( UpperCamelCase__ = 1000000 , UpperCamelCase__ = 10 ):
'''simple docstring'''
snake_case_ = defaultdict(UpperCamelCase__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 200 |
import requests
_UpperCAmelCase : Union[str, Any] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 200 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , ) -> Any:
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :Tuple = 13
lowerCAmelCase_ :Optional[Any] = 7
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :int = 99
lowerCAmelCase_ :Optional[Any] = 32
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :Optional[Any] = 4
lowerCAmelCase_ :Any = 37
lowerCAmelCase_ :List[Any] = """gelu"""
lowerCAmelCase_ :Optional[Any] = 0.1
lowerCAmelCase_ :Dict = 0.1
lowerCAmelCase_ :Union[str, Any] = 512
lowerCAmelCase_ :Union[str, Any] = 16
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :str = 0.0_2
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :int = None
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Any = None
if self.use_input_mask:
lowerCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = TFEsmModel(config=__A )
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :List[str] = model(__A )
lowerCAmelCase_ :Union[str, Any] = [input_ids, input_mask]
lowerCAmelCase_ :int = model(__A )
lowerCAmelCase_ :int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
lowerCAmelCase_ :Optional[int] = True
lowerCAmelCase_ :Tuple = TFEsmModel(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCAmelCase_ :Dict = model(__A )
lowerCAmelCase_ :Optional[int] = [input_ids, input_mask]
lowerCAmelCase_ :Optional[Any] = model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
lowerCAmelCase_ :int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Optional[int] = TFEsmForMaskedLM(config=__A )
lowerCAmelCase_ :Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = self.num_labels
lowerCAmelCase_ :List[str] = TFEsmForTokenClassification(config=__A )
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :Tuple = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :int = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = TFEsmModelTester(self )
lowerCAmelCase_ :List[Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Union[str, Any] = TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase_ :Optional[int] = model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
lowerCAmelCase_ :Tuple = model.get_output_embeddings()
assert x is None
lowerCAmelCase_ :Any = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :str = model(__A )[0]
lowerCAmelCase_ :str = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase_ :Tuple = model(__A )[0]
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 84 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowercase_ : Optional[Any] = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
lowercase_ : Tuple = str(__SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = DatasetInfo.from_directory(__SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , '''dataset_info.json''' ) )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase_ : Tuple = dataset_info._to_yaml_dict()
assert sorted(__SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase_ : int = yaml.safe_dump(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = yaml.safe_load(__SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Any = DatasetInfo()
lowercase_ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : int = str(__SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = DatasetInfosDict.from_directory(__SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase_ : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase_ : int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , '''README.md''' ) )
| 360 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : int = quote(__SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' , revision=__SCREAMING_SNAKE_CASE )
| 264 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[int] = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Optional[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : Dict = do_resize
lowercase__ : int = size
lowercase__ : int = resample
lowercase__ : Tuple = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : int = do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Tuple = do_convert_rgb
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : Union[str, Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
lowercase__ : Any = (size['''height'''], size['''width'''])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> str:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Any = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowercase__ : Dict = image_std if image_std is not None else self.image_std
lowercase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Optional[int] = size if size is not None else self.size
lowercase__ : int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase__ : str = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[Any] = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : Tuple = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 198 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 354 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCAmelCase = model
__lowerCAmelCase = kwargs.get("""model_save_dir""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.get("""latest_model_name""",__SCREAMING_SNAKE_CASE )
def __call__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {k: np.array(__SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()}
return self.model.run(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(__SCREAMING_SNAKE_CASE,providers=[provider],sess_options=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCAmelCase = self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE )
if src_path.exists():
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
# saving model weights/files
self._save_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE )
# load model from hub
else:
# download model
__lowerCAmelCase = hf_hub_download(
repo_id=__SCREAMING_SNAKE_CASE,filename=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).parent
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).name
__lowerCAmelCase = OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE,provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
return cls(model=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = None
if len(str(__SCREAMING_SNAKE_CASE ).split("""@""" ) ) == 2:
__lowerCAmelCase , __lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 46 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( __snake_case ):
_lowerCamelCase :Any = (PNDMScheduler,)
_lowerCamelCase :int = (("num_inference_steps", 50),)
def _lowerCAmelCase ( self : List[Any] , **UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**a_ )
return config
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[Any]=0 , **UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase__ : str = kwargs.pop("""num_inference_steps""" , a_ )
lowerCAmelCase__ : str = self.dummy_sample
lowerCAmelCase__ : Any = 0.1 * sample
lowerCAmelCase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Any = self.get_scheduler_config(**a_ )
lowerCAmelCase__ : int = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowerCAmelCase__ : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowerCAmelCase__ : List[Any] = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
lowerCAmelCase__ : Any = dummy_past_residuals[:]
lowerCAmelCase__ : int = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
lowerCAmelCase__ : Union[str, Any] = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : Tuple = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
lowerCAmelCase__ : Tuple = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[Any]=0 , **UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : int = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Optional[Any] = kwargs.pop("""num_inference_steps""" , a_ )
lowerCAmelCase__ : List[Any] = self.dummy_sample
lowerCAmelCase__ : List[Any] = 0.1 * sample
lowerCAmelCase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
lowerCAmelCase__ : Any = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ : str = dummy_past_residuals[:]
lowerCAmelCase__ : Any = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
lowerCAmelCase__ : Dict = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ : List[Any] = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
lowerCAmelCase__ : Union[str, Any] = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self : List[str] , **UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config(**a_ )
lowerCAmelCase__ : Tuple = scheduler_class(**a_ )
lowerCAmelCase__ : List[Any] = 10
lowerCAmelCase__ : List[str] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ : int = model(a_ , a_ )
lowerCAmelCase__ : Optional[int] = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ : int = model(a_ , a_ )
lowerCAmelCase__ : Optional[int] = scheduler.step_plms(a_ , a_ , a_ ).prev_sample
return sample
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase__ : Any = kwargs.pop("""num_inference_steps""" , a_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a_ )
lowerCAmelCase__ : List[Any] = self.dummy_sample
lowerCAmelCase__ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , """set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , """set_timesteps""" ):
lowerCAmelCase__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ : List[Any] = dummy_past_residuals[:]
lowerCAmelCase__ : str = scheduler.step_prk(a_ , 0 , a_ , **a_ ).prev_sample
lowerCAmelCase__ : Dict = scheduler.step_prk(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ : Tuple = scheduler.step_plms(a_ , 0 , a_ , **a_ ).prev_sample
lowerCAmelCase__ : List[str] = scheduler.step_plms(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a_ )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ : int = scheduler_class(**a_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=a_ )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=a_ )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ : List[str] = self.dummy_sample
lowerCAmelCase__ : Dict = 0.1 * sample
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ : Optional[Any] = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
with self.assertRaises(a_ ):
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Tuple = scheduler_class(**a_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.full_loop()
lowerCAmelCase__ : Any = torch.sum(torch.abs(a_ ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase__ : str = torch.sum(torch.abs(a_ ) )
lowerCAmelCase__ : int = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowerCAmelCase__ : Any = torch.sum(torch.abs(a_ ) )
lowerCAmelCase__ : int = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
lowerCAmelCase__ : Any = torch.sum(torch.abs(a_ ) )
lowerCAmelCase__ : List[str] = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 242 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE : Dict = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
SCREAMING_SNAKE_CASE : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
SCREAMING_SNAKE_CASE : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=None , a_=1 , a_="binary" , a_=None , a_="warn" , ):
'''simple docstring'''
__snake_case : Any = recall_score(
a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ , zero_division=a_ , )
return {"recall": float(a_ ) if score.size == 1 else score}
| 102 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=2 , lowerCamelCase_=56 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=7 , lowerCamelCase_="gelu_new" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , lowerCamelCase_="block_sparse" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=3 , ) -> Tuple:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = rescale_embeddings
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = use_bias
lowerCAmelCase__ = block_size
lowerCAmelCase__ = num_random_blocks
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase__ : Union[str, Any] = False
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().test_hidden_states_output()
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ , lowerCamelCase_=None , **lowerCamelCase_ ):
return model(input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=1e-5 , lowerCamelCase_="outputs" , lowerCamelCase_=None ) -> str:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) | 364 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 228 | 0 |
def snake_case_ ( snake_case ) -> bool:
if not isinstance(snake_case , snake_case ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(snake_case ) == 1:
return True
lowercase__: List[Any] = series[1] - series[0]
for index in range(len(snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def snake_case_ ( snake_case ) -> float:
if not isinstance(snake_case , snake_case ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__: List[str] = 0
for val in series:
answer += val
return answer / len(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: str = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
lowercase__: Optional[Any] = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
lowercase__: Optional[int] = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( snake_case , snake_case ) -> str:
if number >= 0: # Get binary representation of positive number
lowercase__: Union[str, Any] = '0' + str(bin(snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase__: Dict = len(bin(snake_case )[3:] ) # Find 2's complement of number
lowercase__: int = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
lowercase__: Any = (
'1' + '0' * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 1 |
"""simple docstring"""
import itertools
import os
import re
__A : Optional[Any] = re.compile(R'([A-Z]+)([A-Z][a-z])')
__A : Optional[int] = re.compile(R'([a-z\d])([A-Z])')
__A : Optional[Any] = re.compile(R'(?<!_)_(?!_)')
__A : Optional[Any] = re.compile(R'(_{2,})')
__A : Dict = R'^\w+(\.\w+)*$'
__A : Any = R'<>:/\|?*'
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = _uppercase_uppercase_re.sub(r"\1_\2" , lowercase__ )
A = _lowercase_uppercase_re.sub(r"\1_\2" , lowercase__ )
return name.lower()
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = _single_underscore_re.split(lowercase__ )
A = [_multiple_underscores_re.split(lowercase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase__ ) if n != "" )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if os.path.basename(lowercase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowercase__ )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if os.path.basename(lowercase__ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowercase__ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowercase__ )}-{split}"""
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
A = filename_prefix_for_split(lowercase__ , lowercase__ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
A = os.path.join(lowercase__ , lowercase__ )
return F"""{filepath}*"""
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
"""simple docstring"""
A = filename_prefix_for_split(lowercase__ , lowercase__ )
A = os.path.join(lowercase__ , lowercase__ )
if shard_lengths:
A = len(lowercase__ )
A = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowercase__ )]
if filetype_suffix:
A = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
A = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 365 |
"""simple docstring"""
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase :
def __init__(self : Tuple):
A = [
[],
[],
[],
]
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int):
try:
if len(self.queues[priority]) >= 1_0_0:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(__SCREAMING_SNAKE_CASE)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def SCREAMING_SNAKE_CASE__ (self : List[str]):
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__(self : Any):
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class __UpperCamelCase :
def __init__(self : Optional[Any]):
A = []
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int):
if len(self.queue) == 1_0_0:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
A = min(self.queue)
self.queue.remove(__SCREAMING_SNAKE_CASE)
return data
def __str__(self : List[str]):
return str(self.queue)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( a__ , a__ , unittest.TestCase ):
UpperCamelCase = StableDiffusionDiffEditPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
_UpperCAmelCase = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__lowerCAmelCase , set_alpha_to_zero=__lowerCAmelCase , )
torch.manual_seed(0)
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_UpperCAmelCase = CLIPTextModel(__lowerCAmelCase)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self : Dict , A : str , A : Dict=0) -> int:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
_UpperCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
if str(__lowerCAmelCase).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
_UpperCAmelCase = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : str , A : Optional[int] , A : Union[str, Any]=0) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert('RGB')
if str(__lowerCAmelCase).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
_UpperCAmelCase = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : List[Any] , A : List[str] , A : Dict=0) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert('RGB')
if str(__lowerCAmelCase).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
_UpperCAmelCase = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components'):
return
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
_UpperCAmelCase = pipe(**__lowerCAmelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCAmelCase)
_UpperCAmelCase = self.pipeline_class.from_pretrained(__lowerCAmelCase)
pipe_loaded.to(__lowerCAmelCase)
pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCAmelCase , __lowerCAmelCase) is None , F"`{optional_component}` did not stay set to None after loading." , )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
_UpperCAmelCase = pipe_loaded(**__lowerCAmelCase)[0]
_UpperCAmelCase = np.abs(output - output_loaded).max()
self.assertLess(__lowerCAmelCase , 1E-4)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = self.get_dummy_mask_inputs(__lowerCAmelCase)
_UpperCAmelCase = pipe.generate_mask(**__lowerCAmelCase)
_UpperCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
_UpperCAmelCase = np.array([0] * 9)
_UpperCAmelCase = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = self.get_dummy_inversion_inputs(__lowerCAmelCase)
_UpperCAmelCase = pipe.invert(**__lowerCAmelCase).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
_UpperCAmelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3)
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = '''cpu'''
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = {'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''}
_UpperCAmelCase = DPMSolverMultistepScheduler(**__lowerCAmelCase)
_UpperCAmelCase = DPMSolverMultistepInverseScheduler(**__lowerCAmelCase)
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = self.get_dummy_inversion_inputs(__lowerCAmelCase)
_UpperCAmelCase = pipe.invert(**__lowerCAmelCase).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
_UpperCAmelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3)
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowerCamelCase ( cls : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
_UpperCAmelCase = raw_image.convert('RGB').resize((7_68, 7_68))
_UpperCAmelCase = raw_image
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa)
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = '''a bowl of fruit'''
_UpperCAmelCase = '''a bowl of pears'''
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase).latents
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((7_68, 7_68)))
/ 2_55
)
assert np.abs((expected_image - image).max()) < 5E-1
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa)
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = '''a bowl of fruit'''
_UpperCAmelCase = '''a bowl of pears'''
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCAmelCase , target_prompt=__lowerCAmelCase , generator=__lowerCAmelCase , )
_UpperCAmelCase = pipe.invert(
prompt=__lowerCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCAmelCase , num_inference_steps=25 , ).latents
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_latents=__lowerCAmelCase , generator=__lowerCAmelCase , negative_prompt=__lowerCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((7_68, 7_68)))
/ 2_55
)
assert np.abs((expected_image - image).max()) < 5E-1
| 339 | '''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__a: Optional[int] = 4
__a: Optional[Any] = 3
class UpperCAmelCase ( a__ ):
'''simple docstring'''
pass
def __UpperCamelCase ( UpperCAmelCase ):
for shard in shards:
for i in range(UpperCAmelCase ):
yield {"i": i, "shard": shard}
def __UpperCamelCase ( ):
lowercase__ : Tuple = int(os.environ['''RANK'''] )
lowercase__ : List[str] = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : Optional[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=UpperCAmelCase )
parser.add_argument('''--local_rank''' , type=UpperCAmelCase )
parser.add_argument('''--num_workers''' , type=UpperCAmelCase , default=0 )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : List[str] = args.streaming
lowercase__ : str = args.num_workers
lowercase__ : Optional[int] = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(UpperCAmelCase )]}
lowercase__ : Tuple = IterableDataset.from_generator(UpperCAmelCase , gen_kwargs=UpperCAmelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(UpperCAmelCase ) )
lowercase__ : str = split_dataset_by_node(UpperCAmelCase , rank=UpperCAmelCase , world_size=UpperCAmelCase )
lowercase__ : Optional[int] = torch.utils.data.DataLoader(UpperCAmelCase , num_workers=UpperCAmelCase )
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Optional[Any] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 198 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
A_ = ["image_processor", "tokenizer"]
A_ = "CLIPImageProcessor"
A_ = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
__a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
__a : Tuple = kwargs.pop('feature_extractor' )
__a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__a : Dict = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if images is not None:
__a : Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
__a : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.tokenizer.model_input_names
__a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def A_ ( _UpperCAmelCase ):
return "".join(sorted(__lowerCamelCase ) )
def A_ ( _UpperCAmelCase ):
return word_by_signature[signature(__lowerCamelCase )]
lowerCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCAmelCase : List[Any] = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 13 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCamelCase ( A__="ro" , A__="en" , A__="wmt16" , A__=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
UpperCamelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
UpperCamelCase = datasets.load_dataset(__lowerCamelCase , __lowerCamelCase )
if save_dir is None:
UpperCamelCase = F"""{dataset}-{pair}"""
UpperCamelCase = Path(__lowerCamelCase )
save_dir.mkdir(exist_ok=__lowerCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
UpperCamelCase = 'val' if split == 'validation' else split
UpperCamelCase = save_dir.joinpath(F"""{fn}.source""" )
UpperCamelCase = save_dir.joinpath(F"""{fn}.target""" )
UpperCamelCase = src_path.open('w+' )
UpperCamelCase = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 368 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = generator('Something there' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCamelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
UpperCamelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
UpperCamelCase = 3
UpperCamelCase = generator(
'Something there' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
UpperCamelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
UpperCamelCase = generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
| 249 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowerCamelCase ( a_ : str , a_ : complex , a_ : str = "x" , a_ : float = 10**-10 , a_ : int = 1 , ) -> complex:
__SCREAMING_SNAKE_CASE :Any = symbols(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = lambdify(a_ , a_ )
__SCREAMING_SNAKE_CASE :List[str] = lambdify(a_ , diff(a_ , a_ ) )
__SCREAMING_SNAKE_CASE :Any = starting_point
while True:
if diff_function(a_ ) != 0:
__SCREAMING_SNAKE_CASE :str = prev_guess - multiplicity * func(a_ ) / diff_function(
a_ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__SCREAMING_SNAKE_CASE :str = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}') | 191 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Tuple , a_ : str=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE :Dict = nn.Parameter(a_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE :Optional[int] = nn.Parameter(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[int] ) -> Any:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : List[str] ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Any = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE :Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Union[str, Any]:
# layernorm 1
__SCREAMING_SNAKE_CASE :Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE :List[Any] = weights[0][1]
if len(a_ ) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ )
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE :List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_ ) == 4:
__SCREAMING_SNAKE_CASE :List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE :Tuple = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[Any] ) -> Optional[Any]:
# reformer model
__SCREAMING_SNAKE_CASE :Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , )
if isinstance(weights[3] , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE :str = nn.Parameter(torch.tensor(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE :Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_ )
# output layer norm
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : Dict ) -> Tuple:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :List[str] = ReformerConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :List[Any] = ReformerModelWithLMHead(a_ )
with open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = pickle.load(a_ )['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 191 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __snake_case :
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=10 , __lowerCamelCase=3 , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=2 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=10 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0.9 , __lowerCamelCase=None , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : str = batch_size
__A : int = image_size
__A : str = num_channels
__A : List[Any] = patch_size
__A : Any = tubelet_size
__A : int = num_frames
__A : List[str] = is_training
__A : Union[str, Any] = use_labels
__A : Optional[int] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Any = intermediate_size
__A : str = hidden_act
__A : Any = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[str] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : Union[str, Any] = mask_ratio
__A : Any = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__A : Optional[int] = (image_size // patch_size) ** 2
__A : List[str] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__A : int = int(mask_ratio * self.seq_length )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__( self ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : int = VideoMAEModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__A : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = VideoMAEForPreTraining(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__A : Optional[int] = torch.ones((self.num_masks,) )
__A : Optional[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__A : Optional[Any] = mask.expand(self.batch_size , -1 ).bool()
__A : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
# model only returns predictions for masked patches
__A : Union[str, Any] = mask.sum().item()
__A : Any = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = self.prepare_config_and_inputs()
__A , __A , __A : Union[str, Any] = config_and_inputs
__A : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = VideoMAEModelTester(self )
__A : Any = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
'''simple docstring'''
__A : List[Any] = copy.deepcopy(__lowerCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__A : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
__A : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__A : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
__A : int = bool_masked_pos.to(__lowerCamelCase )
if return_labels:
if model_class in [
*get_values(__lowerCamelCase ),
]:
__A : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def UpperCamelCase__( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Optional[Any] = model_class(__lowerCamelCase )
__A : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Union[str, Any] = [*signature.parameters.keys()]
__A : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : int = VideoMAEModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = True
for model_class in self.all_model_classes:
__A : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
__A : Dict = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__A : Dict = True
__A : Optional[Any] = False
__A : List[Any] = True
__A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : str = True
__A : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__A : List[str] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : Dict = len(__lowerCamelCase )
# Check attention is always last and order is fine
__A : List[str] = True
__A : int = True
__A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__A : Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
__A : List[str] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase__( self ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__A : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__A : Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__A : Optional[Any] = outputs.hidden_states
__A : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
__A : List[Any] = self.model_tester.seq_length - self.model_tester.num_masks
__A : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
__A : str = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
__lowerCamelCase )
__A : Union[str, Any] = self.default_image_processor
__A : str = prepare_video()
__A : Optional[Any] = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__A : str = model(**__lowerCamelCase )
# verify the logits
__A : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__A : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Any = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(__lowerCamelCase )
__A : int = self.default_image_processor
__A : Tuple = prepare_video()
__A : Any = image_processor(__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# add boolean mask, indicating which patches to mask
__A : Optional[int] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
__A : Optional[Any] = torch.load(__lowerCamelCase )
# forward pass
with torch.no_grad():
__A : Optional[Any] = model(**__lowerCamelCase )
# verify the logits
__A : int = torch.Size([1, 1408, 1536] )
__A : Dict = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=__lowerCamelCase )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__A : Any = torch.tensor([0.5_1_4_2] , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__A : List[Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=__lowerCamelCase ).to(
__lowerCamelCase )
with torch.no_grad():
__A : Tuple = model(**__lowerCamelCase )
__A : List[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.loss , __lowerCamelCase , atol=1e-4 ) )
| 291 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = XLMTokenizer
_lowerCamelCase = False
def UpperCamelCase__( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__A : Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__A : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = '''lower newer'''
__A : int = '''lower newer'''
return input_text, output_text
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
__A : Optional[Any] = '''lower'''
__A : Any = ['''low''', '''er</w>''']
__A : Tuple = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__A : str = tokens + ['''<unk>''']
__A : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCamelCase )
__A : int = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__A : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 291 | 1 |
'''simple docstring'''
from typing import Any
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_validation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
# Creates data structures and fill initial step
_SCREAMING_SNAKE_CASE : dict = {}
_SCREAMING_SNAKE_CASE : dict = {}
for state in states_space:
_SCREAMING_SNAKE_CASE : List[Any] = observations_space[0]
_SCREAMING_SNAKE_CASE : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_SCREAMING_SNAKE_CASE : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
_SCREAMING_SNAKE_CASE : List[Any] = observations_space[o]
_SCREAMING_SNAKE_CASE : Optional[int] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_SCREAMING_SNAKE_CASE : Optional[int] = """"""
_SCREAMING_SNAKE_CASE : Optional[Any] = -1
for k_state in states_space:
_SCREAMING_SNAKE_CASE : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_SCREAMING_SNAKE_CASE : Union[str, Any] = probability
_SCREAMING_SNAKE_CASE : int = k_state
# Update probabilities and pointers dicts
_SCREAMING_SNAKE_CASE : Any = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_SCREAMING_SNAKE_CASE : List[str] = arg_max
# The final observation
_SCREAMING_SNAKE_CASE : str = observations_space[len(SCREAMING_SNAKE_CASE__ ) - 1]
# argmax for given final observation
_SCREAMING_SNAKE_CASE : List[str] = """"""
_SCREAMING_SNAKE_CASE : List[Any] = -1
for k_state in states_space:
_SCREAMING_SNAKE_CASE : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
_SCREAMING_SNAKE_CASE : Optional[int] = probability
_SCREAMING_SNAKE_CASE : str = k_state
_SCREAMING_SNAKE_CASE : List[Any] = arg_max
# Process pointers backwards
_SCREAMING_SNAKE_CASE : str = last_state
_SCREAMING_SNAKE_CASE : List[Any] = []
for o in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
result.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_validate_not_empty(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
_validate_lists(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_validate_dicts(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_validate_list(SCREAMING_SNAKE_CASE__ , """observations_space""" )
_validate_list(SCREAMING_SNAKE_CASE__ , """states_space""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[str] = f"""{var_name} must be a list"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
else:
for x in _object:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[str] = f"""{var_name} must be a list of strings"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_validate_dict(SCREAMING_SNAKE_CASE__ , """initial_probabilities""" , SCREAMING_SNAKE_CASE__ )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , """transition_probabilities""" )
_validate_nested_dict(SCREAMING_SNAKE_CASE__ , """emission_probabilities""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_validate_dict(_object , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for x in _object.values():
_validate_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
"""simple docstring"""
if not isinstance(_object , SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : str = f"""{var_name} must be a dict"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object ):
_SCREAMING_SNAKE_CASE : Optional[int] = f"""{var_name} all keys must be strings"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object.values() ):
_SCREAMING_SNAKE_CASE : Tuple = """nested dictionary """ if nested else """"""
_SCREAMING_SNAKE_CASE : Tuple = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 200 |
'''simple docstring'''
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = "" , __snake_case = False ):
# Mapping from the first character of the prefix of the node
_SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_SCREAMING_SNAKE_CASE : List[Any] = is_leaf
_SCREAMING_SNAKE_CASE : Optional[Any] = prefix
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self , __snake_case ):
for word in words:
self.insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_SCREAMING_SNAKE_CASE : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_SCREAMING_SNAKE_CASE : List[str] = RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = self.nodes[word[0]]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[matching_string[0]]
_SCREAMING_SNAKE_CASE : List[Any] = RadixNode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = aux_node
if remaining_word == "":
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_SCREAMING_SNAKE_CASE : List[str] = False
# If there is 1 edge, we merge it with its child
else:
_SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
_SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case_ ( ):
"""simple docstring"""
assert test_trie()
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = RadixNode()
_SCREAMING_SNAKE_CASE : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 200 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['YolosFeatureExtractor']
lowerCAmelCase_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[Any] ,_snake_case : Any ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,_snake_case : List[Any]=None ,_snake_case : Any=None ,_snake_case : Optional[int]="auto" ,_snake_case : Optional[int]=-1 ,_snake_case : Optional[int]=0.9 ,_snake_case : Any=5 ,_snake_case : Dict=500 ,_snake_case : Optional[int]="gpt2-large" ,_snake_case : Optional[Any]=-1 ,_snake_case : Tuple=1_024 ,_snake_case : Optional[int]=25 ,_snake_case : Dict=5 ,_snake_case : int=True ,_snake_case : Union[str, Any]=25 ,) -> Any:
"""simple docstring"""
lowercase__ : Any = compute_mauve(
p_text=_snake_case ,q_text=_snake_case ,p_features=_snake_case ,q_features=_snake_case ,p_tokens=_snake_case ,q_tokens=_snake_case ,num_buckets=_snake_case ,pca_max_data=_snake_case ,kmeans_explained_var=_snake_case ,kmeans_num_redo=_snake_case ,kmeans_max_iter=_snake_case ,featurize_model_name=_snake_case ,device_id=_snake_case ,max_text_length=_snake_case ,divergence_curve_discretization_size=_snake_case ,mauve_scaling_factor=_snake_case ,verbose=_snake_case ,seed=_snake_case ,)
return out
| 302 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionSAGPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextModel(lowercase_)
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
SCREAMING_SNAKE_CASE_ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
SCREAMING_SNAKE_CASE_ : List[str] = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''.'''
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
SCREAMING_SNAKE_CASE_ : Dict = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''.'''
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
SCREAMING_SNAKE_CASE_ : List[Any] = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = '''.'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : List[str] = output.images
assert image.shape == (1, 512, 768, 3)
| 91 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_lowerCamelCase : Dict = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
_lowerCamelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCamelCase : Optional[int] = dict(zip(vocab, range(len(vocab))))
_lowerCamelCase : Union[str, Any] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : Optional[Any] = Path(tmpdirname)
_lowerCamelCase : Optional[Any] = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
_lowerCamelCase : str = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
_lowerCamelCase : int = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
_lowerCamelCase : Optional[int] = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_lowerCamelCase : List[Any] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_lowerCamelCase : int = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
_lowerCamelCase : Union[str, Any] = tokenizer(["Making tiny model"], return_tensors="pt")
_lowerCamelCase : Optional[Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 370 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( UpperCAmelCase : int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase : float , UpperCAmelCase : float ) -> bool:
UpperCAmelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Callable[[float], float] , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def a__ ( UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 ) -> None:
def identity_function(UpperCAmelCase : float ) -> float:
return x
UpperCAmelCase : int = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def a__ ( UpperCAmelCase : int ) -> None:
def function_to_integrate(UpperCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[int] = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase : Dict = 'sshleifer/student_marian_en_ro_6_1'
lowercase : Optional[int] = 'sshleifer/tiny-mbart'
@require_torch
class A ( __snake_case ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
A : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , )
A : List[Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
A : Dict = [log for log in logs if '''eval_loss''' in log.keys()]
A : List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
A : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@require_torch_multi_gpu
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE )
@require_apex
@require_torch_gpu
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
A : Any = experiments[experiment_id]
A : Any = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
A : Union[str, Any] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE , extra_args_str=data['''extra_args_str'''] )
A : int = len(re.findall(SCREAMING_SNAKE_CASE , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE , data['''n_matches'''] )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE , )
# Check metrics
A : Union[str, Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
A : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
A : List[str] = eval_metrics[0]
A : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE )
# test if do_predict saves generations and metrics
A : int = os.listdir(SCREAMING_SNAKE_CASE )
A : Optional[int] = {os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE ) -> Tuple[int, float]:
A : Optional[int] = '''--skip_memory_metrics 0'''
A : List[Any] = self.run_trainer(
max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , )
# Check metrics
A : str = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history
A : Union[str, Any] = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
A : int = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
A : List[Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
A, A, A : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
A, A, A : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
A : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
A : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
A : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
A : str = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
A : List[str] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , )
self.assertGreater(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3e-3 , SCREAMING_SNAKE_CASE = "adafactor" , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , ) -> Tuple:
"""simple docstring"""
A : Tuple = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
A : Dict = self.get_auto_remove_tmp_dir()
A : int = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
A : Any = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(SCREAMING_SNAKE_CASE )}\n '.split()
A : Optional[Any] = '''
--do_predict
'''.split()
A : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
A : Dict = get_gpu_count()
A : Tuple = get_torch_dist_unique_port()
A : str = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
A : str = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() )
else:
A : List[str] = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ):
main()
return output_dir
| 3 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase ( snake_case__ : List[Any] ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> Optional[int]:
return (-y * np.log(snake_case__ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : Any ) -> List[str]:
UpperCamelCase : Optional[Any] = np.dot(snake_case__ , snake_case__ )
return np.sum(y * scores - np.log(1 + np.exp(snake_case__ ) ) )
def UpperCamelCase ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int=70000 ) -> Tuple:
UpperCamelCase : Dict = np.zeros(x.shape[1] )
for iterations in range(snake_case__ ):
UpperCamelCase : Optional[Any] = np.dot(snake_case__ , snake_case__ )
UpperCamelCase : int = sigmoid_function(snake_case__ )
UpperCamelCase : str = np.dot(x.T , h - y ) / y.size
UpperCamelCase : Optional[Any] = theta - alpha * gradient # updating the weights
UpperCamelCase : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCamelCase : Tuple = sigmoid_function(snake_case__ )
UpperCamelCase : int = cost_function(snake_case__ , snake_case__ )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def UpperCamelCase ( snake_case__ : List[Any] ) -> Union[str, Any]:
return sigmoid_function(
np.dot(snake_case__ , snake_case__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 103 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Tuple:
# load base model
UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCamelCase : Union[str, Any] = load_file(snake_case__ )
UpperCamelCase : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCamelCase : Optional[Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
UpperCamelCase : Optional[Any] = pipeline.text_encoder
else:
UpperCamelCase : Tuple = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
UpperCamelCase : List[str] = pipeline.unet
# find the target layer
UpperCamelCase : Optional[Any] = layer_infos.pop(0 )
while len(snake_case__ ) > -1:
try:
UpperCamelCase : Dict = curr_layer.__getattr__(snake_case__ )
if len(snake_case__ ) > 0:
UpperCamelCase : Dict = layer_infos.pop(0 )
elif len(snake_case__ ) == 0:
break
except Exception:
if len(snake_case__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCamelCase : Tuple = layer_infos.pop(0 )
UpperCamelCase : List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(snake_case__ )
else:
pair_keys.append(snake_case__ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCamelCase : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCamelCase : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCamelCase : Dict = state_dict[pair_keys[0]].to(torch.floataa )
UpperCamelCase : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__ )
# update visited list
for item in pair_keys:
visited.append(snake_case__ )
return pipeline
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.base_model_path
__UpperCAmelCase = args.checkpoint_path
__UpperCAmelCase = args.dump_path
__UpperCAmelCase = args.lora_prefix_unet
__UpperCAmelCase = args.lora_prefix_text_encoder
__UpperCAmelCase = args.alpha
__UpperCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 103 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : Dict = 5000_0000 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = int((limit - 24) ** (1 / 2) )
_UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowerCamelCase ) ) )
for primea in primes:
_UpperCAmelCase = primea * primea
for primea in primes:
_UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_UpperCAmelCase = primea * primea * primea * primea
_UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(__lowerCamelCase )
return len(__lowerCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 260 |
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase ) -> float:
a = sorted(numsa + numsa )
a , a = divmod(len(__lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Tuple = [float(x) for x in input("Enter the elements of first array: ").split()]
__UpperCamelCase : List[Any] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 228 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class __a( UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase = Features({'''text''': Value('''string''' )} )
lowerCAmelCase = Features({'''summary''': Value('''string''' )} )
lowerCAmelCase = "text"
lowerCAmelCase = "summary"
@property
def a__ ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"} | 355 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = r'''\w+[.]\d+'''
UpperCAmelCase_ : int = re.findall(_lowercase , _lowercase )
for pat in pats:
UpperCAmelCase_ : List[Any] = key.replace(_lowercase , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=42 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ : str = flax_model.init_weights(PRNGKey(_lowercase ) )
UpperCAmelCase_ : List[Any] = flatten_dict(_lowercase )
UpperCAmelCase_ : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = rename_key(_lowercase )
UpperCAmelCase_ : List[str] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase_, UpperCAmelCase_ : Any = rename_key_and_reshape_tensor(_lowercase , _lowercase , _lowercase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : int = jnp.asarray(_lowercase )
return unflatten_dict(_lowercase ) | 235 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase__ = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase_ :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WATERMARK_BITS
UpperCAmelCase__ : Any = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def lowercase_ ( self : Dict , _A : Any ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
UpperCAmelCase__ : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase__ : Optional[int] = [self.encoder.encode(__a , '''dwtDct''' ) for image in images]
UpperCAmelCase__ : str = torch.from_numpy(np.array(__a ) ).permute(0 , 3 , 1 , 2 )
UpperCAmelCase__ : List[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 181 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a ):
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(a ):
print(F"{i}\t\t{d}" )
def _lowerCamelCase( a , a , a ):
for j in range(a ):
__a , __a , __a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def _lowerCamelCase( a , a , a , a ):
__a = [float("inf" )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(a ):
__a , __a , __a = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(a , a , a )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__:int = int(input("""Enter number of vertices: """).strip())
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE__:list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:List[Any] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
SCREAMING_SNAKE_CASE__:List[str] = {"""src""": src, """dst""": dest, """weight""": weight}
SCREAMING_SNAKE_CASE__:Tuple = int(input("""\nEnter shortest path source:""").strip())
SCREAMING_SNAKE_CASE__:str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 268 | """simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE__:List[str] = 3
def _lowerCamelCase( a ):
print("Generating primitive root of p" )
while True:
__a = random.randrange(3 , a )
if pow(a , 2 , a ) == 1:
continue
if pow(a , a , a ) == 1:
continue
return g
def _lowerCamelCase( a ):
print("Generating prime p..." )
__a = rabin_miller.generate_large_prime(a ) # select large prime number.
__a = primitive_root(a ) # one primitive root on modulo p.
__a = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety.
__a = cryptomath.find_mod_inverse(pow(a , a , a ) , a )
__a = (key_size, e_a, e_a, p)
__a = (key_size, d)
return public_key, private_key
def _lowerCamelCase( a , a ):
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
__a , __a = generate_key(a )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as fo:
fo.write(F"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as fo:
fo.write(F"{private_key[0]},{private_key[1]}" )
def _lowerCamelCase( ):
print("Making key files..." )
make_key_files("elgamal" , 2_0_4_8 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 268 | 1 |
from __future__ import annotations
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list[tuple[int, int]]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = position
_UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCAmelCase = []
for position in positions:
_UpperCAmelCase , _UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
"""simple docstring"""
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = position
if board[y][x] == 0:
_UpperCAmelCase = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
_UpperCAmelCase = 0
return False
def __A ( __lowerCAmelCase )-> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
_UpperCAmelCase = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
_UpperCAmelCase = 0
_UpperCAmelCase = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , _a , _a=2 , _a=8 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=16 , _a=5 , _a=2 , _a=36 , _a="gelu" , _a=0.0 , _a=0.0 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> int:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> List[str]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.get_config()
lowerCAmelCase_ = 300
return config
def __a ( self ) -> Dict:
(
lowerCAmelCase_
) = self.prepare_config_and_inputs()
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
lowerCAmelCase_ = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
lowerCAmelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> str:
lowerCAmelCase_ = True
lowerCAmelCase_ = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
lowerCAmelCase_ = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
lowerCAmelCase_ = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> str:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) = config_and_inputs
lowerCAmelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MraModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __a ( self ) -> int:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __a ( self ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def __a ( self ) -> List[Any]:
return
@require_torch
class __magic_name__ (unittest.TestCase ):
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
lowerCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = 50265
lowerCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
lowerCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ = model(__UpperCAmelCase )[0]
lowerCAmelCase_ = 50265
lowerCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
lowerCAmelCase_ = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
'''simple docstring'''
class _a :
def __init__( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = ''''''
UpperCAmelCase = ''''''
UpperCAmelCase = []
def A ( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase = self.__min_dist_top_down_dp(lowercase , n - 1 )
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , lowercase )
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
def A ( self : Optional[int] , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = worda
UpperCAmelCase = worda
UpperCAmelCase = [[-1 for _ in range(len(lowercase ) )] for _ in range(len(lowercase ) )]
return self.__min_dist_top_down_dp(len(lowercase ) - 1 , len(lowercase ) - 1 )
def A ( self : Union[str, Any] , lowercase : str , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = worda
UpperCAmelCase = worda
UpperCAmelCase = len(lowercase )
UpperCAmelCase = len(lowercase )
UpperCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase = j
elif j == 0: # second string is empty
UpperCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase = self.dp[i - 1][j - 1]
else:
UpperCAmelCase = self.dp[i][j - 1]
UpperCAmelCase = self.dp[i - 1][j]
UpperCAmelCase = self.dp[i - 1][j - 1]
UpperCAmelCase = 1 + min(lowercase , lowercase , lowercase )
return self.dp[m][n]
if __name__ == "__main__":
A =EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A =input('Enter the first string: ').strip()
A =input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 34 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = [1]
__lowercase ,__lowercase ,__lowercase : List[str] = 0, 0, 0
__lowercase : List[str] = ugly_nums[ia] * 2
__lowercase : Any = ugly_nums[ia] * 3
__lowercase : str = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
__lowercase : Union[str, Any] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
__lowercase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowercase : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowercase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_0_0) = }")
| 249 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 6_5_5_2_1
def _snake_case ( UpperCAmelCase_ : str ):
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(UpperCAmelCase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 69 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a :
"""simple docstring"""
UpperCAmelCase = BlenderbotConfig
UpperCAmelCase = {}
UpperCAmelCase = "gelu"
def __init__( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: str=13 , UpperCamelCase: Union[str, Any]=7 , UpperCamelCase: int=True , UpperCamelCase: List[Any]=False , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Optional[int]=2 , UpperCamelCase: Tuple=4 , UpperCamelCase: List[Any]=37 , UpperCamelCase: int=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Tuple=20 , UpperCamelCase: List[str]=2 , UpperCamelCase: Dict=1 , UpperCamelCase: Optional[int]=0 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, inputs_dict
def UpperCamelCase ( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = TFBlenderbotModel(config=UpperCamelCase ).get_decoder()
A__ = inputs_dict["""input_ids"""]
A__ = input_ids[:1, :]
A__ = inputs_dict["""attention_mask"""][:1, :]
A__ = inputs_dict["""head_mask"""]
A__ = 1
# first forward pass
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1e-3 )
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , ):
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = TFBlenderbotModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ["My friends are cool but they eat too many carbs."]
UpperCAmelCase = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
A__ = self.model.generate(
model_inputs.input_ids , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 69 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def a__ ( snake_case__ , snake_case__ ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def a__ ( snake_case__ , snake_case__ ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
lowerCamelCase = (
"""Wrong input data's dimensions... """
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(snake_case__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCamelCase = (
"""Wrong input data's shape... """
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(snake_case__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCamelCase = (
"""Input data have different datatype... """
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(snake_case__ )
lowerCamelCase = []
for value in value_array:
lowerCamelCase = euclidean(snake_case__ , dataset[0] )
lowerCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCamelCase = euclidean(snake_case__ , snake_case__ )
if dist > temp_dist:
lowerCamelCase = temp_dist
lowerCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def a__ ( snake_case__ , snake_case__ ) -> float:
return np.dot(snake_case__ , snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 1 |
from collections import Counter
from timeit import timeit
def lowerCamelCase__ ( lowercase = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowerCamelCase__ ( lowercase = "" ):
"""simple docstring"""
if len(lowercase ) == 0:
return True
SCREAMING_SNAKE_CASE : List[Any] = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
SCREAMING_SNAKE_CASE : dict[str, int] = {}
for character in lower_case_input_str:
SCREAMING_SNAKE_CASE : List[Any] = character_freq_dict.get(lowercase , 0 ) + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCamelCase__ ( lowercase = "" ):
"""simple docstring"""
print("\nFor string = " , lowercase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowercase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
snake_case = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 319 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__a , __a = emb.weight.shape
__a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
__a = {}
for old_key in state_dict.keys():
__a = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__a = key.replace("""moe_layer.experts.0""" , f"ffn.experts.expert_{expert_idx}" )
else:
__a = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__a = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__a = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__a = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__a = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__a = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__a = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__a = state_dict[old_key]
return new_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ):
"""simple docstring"""
__a = []
__a = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
for expert in range(_SCREAMING_SNAKE_CASE ):
__a = switch_checkpoint_path + f"-rank-{expert}.pt"
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
__a = torch.load(_SCREAMING_SNAKE_CASE )["""model"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
__a = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
__a = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin" ) )
__a = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
__a = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_SCREAMING_SNAKE_CASE ) == 1:
__a = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
__a = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
__a = weights_name.replace(""".bin""" , f"-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin" )
__a = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for key in shard:
__a = shard_file
# Add the metadata
__a = {"""total_size""": total_size}
__a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ , lowerCamelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCamelCase__ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 302 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
A : Any = logging.getLogger(__name__)
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''masked_bert'''
def __init__( self : Tuple , __lowerCAmelCase : int=3_05_22 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : Optional[int]=30_72 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1e-12 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : str="topK" , __lowerCAmelCase : List[Any]="constant" , __lowerCAmelCase : Optional[int]=0.0 , **__lowerCAmelCase : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = pruning_method
A__ = mask_init
A__ = mask_scale
| 276 |
def __lowerCamelCase ( __a :int = 3 , __a :int = 7 , __a :int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A__ = 0
A__ = 1
for current_denominator in range(1 , limit + 1 ):
A__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A__ = current_numerator
A__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 276 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ )-> list[int]:
'''simple docstring'''
a : Tuple = 0
a : Any = len(A_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
a : Tuple = i + 1
else:
a : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 40 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase : List[str] = """src/transformers"""
lowercase : Optional[int] = """docs/source/en/tasks"""
def A_ ( A__ , A__ , A__ ) -> Tuple:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Any = f.readlines()
# Find the start prompt.
a__ : str = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
a__ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
lowercase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def A_ ( A__ ) -> Optional[int]:
a__ : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
a__ : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__ , set() )
a__ : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def A_ ( A__ , A__=False ) -> Optional[int]:
a__ , a__ , a__ , a__ : Dict = _find_text_in_file(
filename=os.path.join(A__ , A__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
a__ : List[Any] = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 99 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowerCamelCase : Any = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
lowerCamelCase : Any = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
lowerCamelCase : Optional[Any] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def A ( self : Tuple , _a : str , _a : Dict , _a : Union[str, Any]=None , _a : Dict=False , _a : Any=False , _a : Optional[Any]=False , ) -> Optional[Any]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_SCREAMING_SNAKE_CASE =np.array([re.sub(_a , '' , _a ) for x in predictions] )
_SCREAMING_SNAKE_CASE =np.array([re.sub(_a , '' , _a ) for x in references] )
else:
_SCREAMING_SNAKE_CASE =np.asarray(_a )
_SCREAMING_SNAKE_CASE =np.asarray(_a )
if ignore_case:
_SCREAMING_SNAKE_CASE =np.char.lower(_a )
_SCREAMING_SNAKE_CASE =np.char.lower(_a )
if ignore_punctuation:
_SCREAMING_SNAKE_CASE =string.punctuation.maketrans('' , '' , string.punctuation )
_SCREAMING_SNAKE_CASE =np.char.translate(_a , table=_a )
_SCREAMING_SNAKE_CASE =np.char.translate(_a , table=_a )
if ignore_numbers:
_SCREAMING_SNAKE_CASE =string.digits.maketrans('' , '' , string.digits )
_SCREAMING_SNAKE_CASE =np.char.translate(_a , table=_a )
_SCREAMING_SNAKE_CASE =np.char.translate(_a , table=_a )
_SCREAMING_SNAKE_CASE =predictions == references
return {"exact_match": np.mean(_a ) * 100}
| 114 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Tuple = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : List[str]):
return self._get_dummy_components()
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : Any=0):
if str(A_).startswith('''mps'''):
lowerCAmelCase_ : List[Any] = torch.manual_seed(A_)
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=A_).manual_seed(A_)
lowerCAmelCase_ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : int):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def UpperCAmelCase__ ( self : str):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def UpperCAmelCase__ ( self : str):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def UpperCAmelCase__ ( self : int):
self._test_save_load_local()
def UpperCAmelCase__ ( self : str):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Optional[int]):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
# if
lowerCAmelCase_ : Dict = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
lowerCAmelCase_ : Dict = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : List[str] = IFImgaImgPipeline(**pipe_a.components)
lowerCAmelCase_ : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A_ , A_ , A_ , A_)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : int = IFInpaintingPipeline(**pipe_a.components)
lowerCAmelCase_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A_ , A_ , A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : str , A_ : Union[str, Any]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowerCAmelCase_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : List[str] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[int] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Any , A_ : List[str] , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Tuple = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : int = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCAmelCase__ ( self : str , A_ : Optional[Any] , A_ : Optional[Any] , A_ : Dict , A_ : List[str]):
# pipeline 1
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Any = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type='''np''' , )
lowerCAmelCase_ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowerCAmelCase_ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A_ , A_)
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0)).to(A_)
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1)).to(A_)
lowerCAmelCase_ : int = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A_ , A_)
def UpperCamelCase( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 103 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ : Optional[Any] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A__ : Dict = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
_a = '''mask2former'''
_a = ['''swin''']
_a = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Any , A_ : Optional[Dict] = None , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 2_5_6 , A_ : int = 1_0_2_4 , A_ : str = "relu" , A_ : int = 6 , A_ : int = 1_0 , A_ : int = 8 , A_ : float = 0.0 , A_ : int = 2_0_4_8 , A_ : bool = False , A_ : bool = False , A_ : int = 4 , A_ : int = 2_5_5 , A_ : int = 1_0_0 , A_ : float = 0.1 , A_ : float = 2.0 , A_ : float = 5.0 , A_ : float = 5.0 , A_ : int = 1_2_5_4_4 , A_ : float = 3.0 , A_ : float = 0.75 , A_ : float = 0.02 , A_ : float = 1.0 , A_ : bool = True , A_ : List[int] = [4, 8, 1_6, 3_2] , A_ : bool = None , **A_ : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''')
lowerCAmelCase_ : int = CONFIG_MAPPING['''swin'''](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=A_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(A_ , A_):
lowerCAmelCase_ : List[Any] = backbone_config.pop('''model_type''')
lowerCAmelCase_ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : List[Any] = config_class.from_dict(A_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
F"""Supported model types: {",".join(self.backbones_supported)}""")
lowerCAmelCase_ : List[Any] = backbone_config
lowerCAmelCase_ : str = feature_size
lowerCAmelCase_ : Optional[Any] = mask_feature_size
lowerCAmelCase_ : int = hidden_dim
lowerCAmelCase_ : int = encoder_feedforward_dim
lowerCAmelCase_ : Optional[int] = activation_function
lowerCAmelCase_ : Any = encoder_layers
lowerCAmelCase_ : Optional[Any] = decoder_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : List[str] = dim_feedforward
lowerCAmelCase_ : Optional[Any] = pre_norm
lowerCAmelCase_ : List[str] = enforce_input_projection
lowerCAmelCase_ : Tuple = common_stride
lowerCAmelCase_ : Optional[Any] = ignore_value
lowerCAmelCase_ : Optional[Any] = num_queries
lowerCAmelCase_ : int = no_object_weight
lowerCAmelCase_ : Tuple = class_weight
lowerCAmelCase_ : int = mask_weight
lowerCAmelCase_ : Dict = dice_weight
lowerCAmelCase_ : str = train_num_points
lowerCAmelCase_ : Dict = oversample_ratio
lowerCAmelCase_ : Tuple = importance_sample_ratio
lowerCAmelCase_ : List[str] = init_std
lowerCAmelCase_ : List[str] = init_xavier_std
lowerCAmelCase_ : Optional[Any] = use_auxiliary_loss
lowerCAmelCase_ : List[Any] = feature_strides
lowerCAmelCase_ : int = output_auxiliary_logits
lowerCAmelCase_ : Optional[Any] = decoder_layers
super().__init__(**A_)
@classmethod
def UpperCAmelCase__ ( cls : List[str] , A_ : PretrainedConfig , **A_ : List[Any]):
return cls(
backbone_config=A_ , **A_ , )
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Dict = self.backbone_config.to_dict()
lowerCAmelCase_ : Optional[int] = self.__class__.model_type
return output
| 103 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase ) -> YolosConfig:
'''simple docstring'''
_A = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_A = 192
_A = 768
_A = 12
_A = 3
_A = [800, 1333]
_A = False
elif yolos_name == "yolos_s_dWr":
_A = 330
_A = 14
_A = 6
_A = 1320
elif "yolos_s" in yolos_name:
_A = 384
_A = 1536
_A = 12
_A = 6
elif "yolos_b" in yolos_name:
_A = [800, 1344]
_A = 91
_A = "huggingface/label-files"
_A = "coco-detection-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( __lowercase , __lowercase , __lowercase = False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A = in_proj_weight[: config.hidden_size, :]
_A = in_proj_bias[: config.hidden_size]
_A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A = in_proj_weight[-config.hidden_size :, :]
_A = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
if "backbone" in name:
_A = name.replace("backbone" , "vit" )
if "cls_token" in name:
_A = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
_A = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
_A = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
_A = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
_A = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_A = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_A = name.replace("attn" , "attention.self" )
if "norm1" in name:
_A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_A = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_A = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
_A = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
_A = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
_A = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(__lowercase )
if "qkv" in key:
_A = key.split("." )
_A = int(key_split[2] )
_A = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_A = val[:dim, :]
_A = val[
dim : dim * 2, :
]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def __lowercase ( ) -> torch.Tensor:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase = False ) -> Tuple:
'''simple docstring'''
_A = get_yolos_config(__lowercase )
# load original state_dict
_A = torch.load(__lowercase , map_location="cpu" )["model"]
# load 🤗 model
_A = YolosForObjectDetection(__lowercase )
model.eval()
_A = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
_A = 800 if yolos_name != "yolos_ti" else 512
_A = YolosImageProcessor(format="coco_detection" , size=__lowercase )
_A = image_processor(images=prepare_img() , return_tensors="pt" )
_A = model(**__lowercase )
_A , _A = outputs.logits, outputs.pred_boxes
_A , _A = None, None
if yolos_name == "yolos_ti":
_A = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_A = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_A = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_A = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_A = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_A = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_A = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_A = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_A = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_A = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
_A = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
_A = model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization="hustvl" )
model.push_to_hub(__lowercase , organization="hustvl" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 174 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __lowercase ( __lowercase = 150_0000 ) -> int:
'''simple docstring'''
_A = defaultdict(__lowercase )
_A = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __lowercase , 2 ):
if gcd(__lowercase , __lowercase ) > 1:
continue
_A = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__lowercase , limit + 1 , __lowercase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowercase_ , "depth_multiplier" ) )
class snake_case_ :
def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any]=13 , lowercase_ : List[Any]=3 , lowercase_ : Tuple=32 , lowercase_ : int=0.25 , lowercase_ : List[Any]=8 , lowercase_ : Tuple=8 , lowercase_ : List[str]=6 , lowercase_ : Tuple=32 , lowercase_ : str=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : Optional[Any]="relu6" , lowercase_ : str=12_80 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Any=10 , lowercase_ : Any=None , ) -> Dict:
lowercase__ : Tuple = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[Any] = image_size
lowercase__ : Union[str, Any] = depth_multiplier
lowercase__ : Dict = depth_divisible_by
lowercase__ : Any = min_depth
lowercase__ : Any = expand_ratio
lowercase__ : Union[str, Any] = tf_padding
lowercase__ : Dict = output_stride
lowercase__ : Optional[Any] = first_layer_is_expansion
lowercase__ : Optional[Any] = finegrained_output
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase__ : Union[str, Any] = classifier_dropout_prob
lowercase__ : List[str] = use_labels
lowercase__ : Tuple = is_training
lowercase__ : Optional[int] = num_labels
lowercase__ : str = initializer_range
lowercase__ : Union[str, Any] = scope
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : str ) -> str:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str ) -> Optional[int]:
lowercase__ : List[Any] = MobileNetVaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple ) -> List[Any]:
lowercase__ : Tuple = self.num_labels
lowercase__ : List[str] = MobileNetVaForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Optional[int] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] ) -> Union[str, Any]:
lowercase__ : Any = self.num_labels
lowercase__ : List[str] = MobileNetVaForSemanticSegmentation(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : str = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase__ : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : Tuple = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Dict = False
__A : Union[str, Any] = False
__A : Optional[Any] = False
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : List[Any] = MobileNetVaModelTester(self )
lowercase__ : Dict = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def __UpperCamelCase ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def __UpperCamelCase ( self : Tuple ) -> str:
pass
def __UpperCamelCase ( self : int ) -> int:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(lowercase_ )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : int = outputs.hidden_states
lowercase__ : Tuple = 16
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@slow
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = MobileNetVaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ):
lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(lowercase_ )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**lowercase_ )
# verify the logits
lowercase__ : Dict = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase__ : List[Any] = model.to(lowercase_ )
lowercase__ : List[str] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**lowercase_ )
lowercase__ : Dict = outputs.logits
# verify the logits
lowercase__ : Any = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase_ )
lowercase__ : int = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
| 87 |
import operator as op
a__ = '''scaler.pt'''
a__ = '''pytorch_model'''
a__ = '''random_states'''
a__ = '''optimizer'''
a__ = '''scheduler'''
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''1.10.2'''
a__ = '''py38'''
a__ = '''4.17.0'''
a__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
a__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
a__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
a__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
a__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
a__ = '''2.0.1'''
a__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
a__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
a__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
a__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
a__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 235 | 0 |
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase_ : int = cst_fwd.get(__UpperCamelCase , np.inf )
lowerCAmelCase_ : Tuple = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase_ : Dict = new_cost_f
lowerCAmelCase_ : Optional[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase_ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = -1
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[str] = set()
lowerCAmelCase_ : Optional[int] = {source: 0}
lowerCAmelCase_ : Tuple = {destination: 0}
lowerCAmelCase_ : List[str] = {source: None}
lowerCAmelCase_ : Any = {destination: None}
lowerCAmelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCAmelCase_ : PriorityQueue[Any] = PriorityQueue()
lowerCAmelCase_ : int = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase_ , lowerCAmelCase_ : Dict = queue_forward.get()
visited_forward.add(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : str = queue_backward.get()
visited_backward.add(__UpperCamelCase )
lowerCAmelCase_ : Any = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
lowerCAmelCase_ : List[str] = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase_ : Optional[int] = shortest_distance
return shortest_path_distance
lowercase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
lowercase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase__ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowercase__ = {"""facebook/blenderbot-3B""": 128}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Dict = VOCAB_FILES_NAMES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[int] = ["""input_ids""", """attention_mask"""]
a_ : int = BlenderbotTokenizer
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : int=None , a_ : str="replace" , a_ : Tuple="<s>" , a_ : Optional[int]="</s>" , a_ : Union[str, Any]="</s>" , a_ : Union[str, Any]="<s>" , a_ : Optional[Any]="<unk>" , a_ : str="<pad>" , a_ : List[Any]="<mask>" , a_ : Tuple=False , a_ : Dict=True , **a_ : str , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCAmelCase_ : int = add_prefix_space
lowerCAmelCase_ : List[Any] = pre_tok_class(**a_ )
lowerCAmelCase_ : Any = add_prefix_space
lowerCAmelCase_ : str = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[int] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCAmelCase_ : List[str] = add_prefix_space
lowerCAmelCase_ : Any = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCAmelCase_ : int = trim_offsets
lowerCAmelCase_ : List[str] = True
if changes_to_apply:
lowerCAmelCase_ : Optional[Any] = getattr(a_ , state.pop("type" ) )
lowerCAmelCase_ : Tuple = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase ( self : int , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCAmelCase_ : Tuple = value
def lowerCamelCase ( self : int , *a_ : List[str] , **a_ : Optional[int] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : str , *a_ : Union[str, Any] , **a_ : List[str] ):
lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : str , a_ : Optional[str] = None ):
lowerCAmelCase_ : str = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def lowerCamelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : "Conversation" ):
lowerCAmelCase_ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(a_ )
lowerCAmelCase_ : Tuple = " ".join(a_ )
lowerCAmelCase_ : Any = self.encode(a_ )
if len(a_ ) > self.model_max_length:
lowerCAmelCase_ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 161 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''longformer'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[List[int], int] = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 30_522 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Tuple , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = attention_window
UpperCAmelCase_ : Union[str, Any] = sep_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : Any = eos_token_id
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : List[str] = onnx_export
class UpperCamelCase_ (__A ):
def __init__( self : Any , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> int:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = True
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Any = super().outputs
if self.task == "default":
UpperCAmelCase_ : str = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : int = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : Optional[int] = 1
return inputs
| 268 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : str = TypeVar("T")
class a__ ( Generic[T] ):
def __init__( self , A = True ) -> None:
'''simple docstring'''
a = {} # dictionary of lists
a = directed
def lowerCAmelCase_ ( self , A , A ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
self.adj_list[destination_vertex].append(A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A )
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a = [destination_vertex]
a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a = [destination_vertex]
a = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 180 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
debug_launcher(test_script.main )
def UpperCamelCase_ ( self : Optional[Any] ):
debug_launcher(test_ops.main )
| 15 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
snake_case : Tuple = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, "r", encoding="utf-8") as f:
snake_case : int = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self , _a ):
return FSMTTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Any = f'''facebook/wmt19-{pair}'''
__magic_name__ : Tuple = self.get_tokenizer(_SCREAMING_SNAKE_CASE )
__magic_name__ : str = self.get_model(_SCREAMING_SNAKE_CASE )
__magic_name__ : Dict = bleu_data[pair]["src"]
__magic_name__ : Dict = bleu_data[pair]["tgt"]
__magic_name__ : Optional[int] = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="pt" , truncation=_SCREAMING_SNAKE_CASE , padding="longest" ).to(_SCREAMING_SNAKE_CASE )
__magic_name__ : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__magic_name__ : Optional[Any] = tokenizer.batch_decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
__magic_name__ : List[str] = calculate_bleu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(scores["bleu"] , _SCREAMING_SNAKE_CASE )
| 363 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'transfo-xl'
UpperCamelCase__ = ['mems']
UpperCamelCase__ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=267_735 , _a=[20_000, 40_000, 200_000] , _a=1_024 , _a=1_024 , _a=16 , _a=64 , _a=4_096 , _a=4 , _a=False , _a=18 , _a=1_600 , _a=1_000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ):
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Dict = []
self.cutoffs.extend(_a )
if proj_share_all_but_first:
__magic_name__ : List[str] = [False] + [True] * len(self.cutoffs )
else:
__magic_name__ : Optional[Any] = [False] + [False] * len(self.cutoffs )
__magic_name__ : Optional[int] = d_model
__magic_name__ : str = d_embed
__magic_name__ : Optional[Any] = d_head
__magic_name__ : Optional[int] = d_inner
__magic_name__ : List[str] = div_val
__magic_name__ : List[str] = pre_lnorm
__magic_name__ : Union[str, Any] = n_layer
__magic_name__ : Optional[int] = n_head
__magic_name__ : str = mem_len
__magic_name__ : int = same_length
__magic_name__ : Dict = attn_type
__magic_name__ : int = clamp_len
__magic_name__ : Optional[int] = sample_softmax
__magic_name__ : List[Any] = adaptive
__magic_name__ : Optional[int] = dropout
__magic_name__ : Optional[int] = dropatt
__magic_name__ : Optional[Any] = untie_r
__magic_name__ : List[str] = init
__magic_name__ : Any = init_range
__magic_name__ : Optional[int] = proj_init_std
__magic_name__ : List[Any] = init_std
__magic_name__ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 41 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCamelCase = 12_8022
__UpperCamelCase = 12_8028
@require_sentencepiece
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> Tuple:
super().setUp()
snake_case_ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = Path(self.tmpdirname)
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['spm_file'])
snake_case_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def a_ ( self) -> Optional[int]:
snake_case_ = '</s>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizer()
snake_case_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '<s>')
self.assertEqual(len(lowerCAmelCase__), tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('Skip this test while all models are still to be uploaded.')
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> Tuple:
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [2, 3, 4, 5, 6], )
snake_case_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
snake_case_ = tokenizer.convert_tokens_to_string(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, 'This is a test')
@slow
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE_ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def a_ ( cls) -> Any:
snake_case_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr')
snake_case_ = 1
return cls
def a_ ( self) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id('ar'), 12_8006)
self.assertEqual(self.tokenizer.get_lang_id('en'), 12_8022)
self.assertEqual(self.tokenizer.get_lang_id('ro'), 12_8076)
self.assertEqual(self.tokenizer.get_lang_id('mr'), 12_8063)
def a_ ( self) -> List[str]:
snake_case_ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__), self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'], 3)
self.assertIn(self.tokenizer.get_lang_token('en'), lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = 'en'
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
def a_ ( self) -> Any:
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids)
# fmt: off
snake_case_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.lang_token_to_id, lowerCAmelCase__)
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'en'
snake_case_ = 'fr'
snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt')
snake_case_ = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id)
for k in batch:
snake_case_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
snake_case_ = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
@require_torch
def a_ ( self) -> Dict:
snake_case_ = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
snake_case_ = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar')
self.assertEqual(
nested_simplify(lowerCAmelCase__), {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
}, )
| 69 | """simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69 | 1 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@slow
@require_torch
def A (self : Any ):
A = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
A = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A = bertabert.config.encoder.vocab_size
A = tokenizer.sep_token_id
A = tokenizer.cls_token_id
A = 128
A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
A = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
A = train_dataset.select(range(32 ) )
A = val_dataset.select(range(16 ) )
A = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__UpperCAmelCase , max_length=512 )
A = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__UpperCAmelCase , max_length=128 )
A = inputs.input_ids
A = inputs.attention_mask
A = outputs.input_ids
A = outputs.input_ids.copy()
A = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
A = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase : int ):
A = pred.label_ids
A = pred.predictions
# all unnecessary tokens are removed
A = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
A = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
A = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
A = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
A = self.get_auto_remove_tmp_dir()
A = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy="""steps""" , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 354 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = '''yolos'''
def __init__(self : Tuple , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=12 , _lowerCAmelCase : Optional[int]=3072 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Optional[Any]=1e-12 , _lowerCAmelCase : Optional[Any]=[512, 864] , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=100 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Any=0.1 , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def A (self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A (self : Any ):
return 1e-4
@property
def A (self : int ):
return 12
| 337 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.