code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
__lowercase= ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__lowercase= SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _lowerCamelCase( lowercase__ = 1_0_0 ) -> Optional[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= [PokerHand(lowercase__ ) for hand in SORTED_HANDS]
__lowercase= poker_hands.copy()
shuffle(lowercase__ )
__lowercase= chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= PokerHand('2C 4S AS 3D 5C' )
__lowercase= True
__lowercase= [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _lowerCamelCase( ) -> Any:
'''simple docstring'''
__lowercase= 0
__lowercase= os.path.abspath(os.path.dirname(lowercase__ ) )
__lowercase= os.path.join(lowercase__ , 'poker_hands.txt' )
with open(lowercase__ ) as file_hand:
for line in file_hand:
__lowercase= line[:1_4].strip()
__lowercase= line[1_5:].strip()
__lowercase= PokerHand(lowercase__ ), PokerHand(lowercase__ )
__lowercase= player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[Any] =AudioLDMPipeline
UpperCamelCase_ : List[Any] =TEXT_TO_AUDIO_PARAMS
UpperCamelCase_ : int =TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] =frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=UpperCamelCase_ , )
__lowercase= DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
__lowercase= AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase= ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
__lowercase= ClapTextModelWithProjection(UpperCamelCase_ )
__lowercase= RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
__lowercase= SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase_ , )
__lowercase= SpeechTaHifiGan(UpperCamelCase_ )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(UpperCamelCase_ ).startswith('mps' ):
__lowercase= torch.manual_seed(UpperCamelCase_ )
else:
__lowercase= torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase= {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= audioldm_pipe(**UpperCamelCase_ )
__lowercase= output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 2_5_6
__lowercase= audio[:1_0]
__lowercase= np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= self.get_dummy_components()
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= 3 * [inputs['prompt']]
# forward
__lowercase= audioldm_pipe(**UpperCamelCase_ )
__lowercase= output.audios[0]
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= 3 * [inputs.pop('prompt' )]
__lowercase= audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='pt' , )
__lowercase= text_inputs['input_ids'].to(UpperCamelCase_ )
__lowercase= audioldm_pipe.text_encoder(
UpperCamelCase_ , )
__lowercase= prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowercase= F.normalize(UpperCamelCase_ , dim=-1 )
__lowercase= prompt_embeds
# forward
__lowercase= audioldm_pipe(**UpperCamelCase_ )
__lowercase= output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _A (self ):
__lowercase= self.get_dummy_components()
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= 3 * ['this is a negative prompt']
__lowercase= negative_prompt
__lowercase= 3 * [inputs['prompt']]
# forward
__lowercase= audioldm_pipe(**UpperCamelCase_ )
__lowercase= output.audios[0]
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= 3 * [inputs.pop('prompt' )]
__lowercase= []
for p in [prompt, negative_prompt]:
__lowercase= audioldm_pipe.tokenizer(
UpperCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='pt' , )
__lowercase= text_inputs['input_ids'].to(UpperCamelCase_ )
__lowercase= audioldm_pipe.text_encoder(
UpperCamelCase_ , )
__lowercase= text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowercase= F.normalize(UpperCamelCase_ , dim=-1 )
embeds.append(UpperCamelCase_ )
__lowercase, __lowercase= embeds
# forward
__lowercase= audioldm_pipe(**UpperCamelCase_ )
__lowercase= output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= 'egg cracking'
__lowercase= audioldm_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
__lowercase= output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 2_5_6
__lowercase= audio[:1_0]
__lowercase= np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
__lowercase= audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowercase= 2
__lowercase= audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
__lowercase= 2
__lowercase= audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
__lowercase= 2
__lowercase= audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def _A (self ):
__lowercase= 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase= self.get_dummy_components()
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= audioldm_pipe.vocoder.config.sampling_rate
__lowercase= self.get_dummy_inputs(UpperCamelCase_ )
__lowercase= audioldm_pipe(audio_length_in_s=0.0_16 , **UpperCamelCase_ )
__lowercase= output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_16
__lowercase= audioldm_pipe(audio_length_in_s=0.0_32 , **UpperCamelCase_ )
__lowercase= output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_32
def _A (self ):
__lowercase= self.get_dummy_components()
__lowercase= AudioLDMPipeline(**UpperCamelCase_ )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= ['hey']
__lowercase= audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
__lowercase= output.audios.shape
assert audio_shape == (1, 2_5_6)
__lowercase= audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowercase= SpeechTaHifiGan(UpperCamelCase_ ).to(UpperCamelCase_ )
__lowercase= audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 )
__lowercase= output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def _A (self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase_ )
def _A (self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _A (self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ )
@slow
class A ( unittest.TestCase ):
def _A (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self , lowerCAmelCase , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=0 ):
__lowercase= torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowercase= np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 8, 1_2_8, 1_6) )
__lowercase= torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
__lowercase= {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def _A (self ):
__lowercase= AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_inputs(UpperCamelCase_ )
__lowercase= 2_5
__lowercase= audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_1_9_2_0
__lowercase= audio[7_7_2_3_0:7_7_2_4_0]
__lowercase= np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
__lowercase= np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _A (self ):
__lowercase= AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
__lowercase= LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowercase= audioldm_pipe.to(UpperCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowercase= self.get_inputs(UpperCamelCase_ )
__lowercase= audioldm_pipe(**UpperCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase_ ) == 8_1_9_2_0
__lowercase= audio[2_7_7_8_0:2_7_7_9_0]
__lowercase= np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
__lowercase= np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import string
import numpy
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , lowercase__ )
class A :
UpperCamelCase_ : List[Any] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase_ : List[Any] =numpy.vectorize(lambda A_ : x % 36 )
UpperCamelCase_ : Union[str, Any] =numpy.vectorize(lowerCAmelCase_ )
def __init__(self , lowerCAmelCase ):
__lowercase= self.modulus(__lowerCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowercase= encrypt_key.shape[0]
def _A (self , lowerCAmelCase ):
return self.key_string.index(__lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.key_string[round(__lowerCAmelCase )]
def _A (self ):
__lowercase= round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase= det % len(self.key_string )
__lowercase= len(self.key_string )
if greatest_common_divisor(__lowerCAmelCase , len(self.key_string ) ) != 1:
__lowercase= (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= [char for char in text.upper() if char in self.key_string]
__lowercase= chars[-1]
while len(__lowerCAmelCase ) % self.break_key != 0:
chars.append(__lowerCAmelCase )
return "".join(__lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.process_text(text.upper() )
__lowercase= ''
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
__lowercase= text[i : i + self.break_key]
__lowercase= [self.replace_letters(__lowerCAmelCase ) for char in batch]
__lowercase= numpy.array([vec] ).T
__lowercase= self.modulus(self.encrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[
0
]
__lowercase= ''.join(
self.replace_digits(__lowerCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _A (self ):
__lowercase= round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase= det % len(self.key_string )
__lowercase= None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowercase= i
break
__lowercase= (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCAmelCase ) )
def _A (self , lowerCAmelCase ):
__lowercase= self.make_decrypt_key()
__lowercase= self.process_text(text.upper() )
__lowercase= ''
for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ):
__lowercase= text[i : i + self.break_key]
__lowercase= [self.replace_letters(__lowerCAmelCase ) for char in batch]
__lowercase= numpy.array([vec] ).T
__lowercase= self.modulus(decrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[0]
__lowercase= ''.join(
self.replace_digits(__lowerCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= int(input('Enter the order of the encryption key: ' ) )
__lowercase= []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(lowercase__ ):
__lowercase= [int(lowercase__ ) for x in input().split()]
hill_matrix.append(lowercase__ )
__lowercase= HillCipher(numpy.array(lowercase__ ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__lowercase= input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__lowercase= input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(lowercase__ ) )
elif option == "2":
__lowercase= input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase = random.Random()
def _lowerCamelCase( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__lowercase= global_rng
__lowercase= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_0_0 , lowerCAmelCase=2_0_0_0 , lowerCAmelCase=2_4 , lowerCAmelCase=2_4 , lowerCAmelCase=0.0 , lowerCAmelCase=1_6_0_0_0 , lowerCAmelCase=True , lowerCAmelCase=True , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= min_seq_length
__lowercase= max_seq_length
__lowercase= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase= feature_size
__lowercase= num_mel_bins
__lowercase= padding_value
__lowercase= sampling_rate
__lowercase= return_attention_mask
__lowercase= do_normalize
def _A (self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A (self , lowerCAmelCase=False , lowerCAmelCase=False ):
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*a_ ) )
if equal_length:
__lowercase= [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase= [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase= [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ : Dict =SpeechaTextFeatureExtractor if is_speech_available() else None
def _A (self ):
__lowercase= SpeechaTextFeatureExtractionTester(self )
def _A (self , lowerCAmelCase ):
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1E-3 ) )
def _A (self ):
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
__lowercase= feature_extractor(a_ , padding=a_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__lowercase= feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__lowercase= feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# Test batched
__lowercase= feature_extractor(a_ , return_tensors='np' ).input_features
__lowercase= feature_extractor(a_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase= [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase= np.asarray(a_ )
__lowercase= feature_extractor(a_ , return_tensors='np' ).input_features
__lowercase= feature_extractor(a_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
def _A (self ):
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= ['longest', 'max_length', 'do_not_pad']
__lowercase= [None, 1_6, None]
for max_length, padding in zip(a_ , a_ ):
__lowercase= feature_extractor(
a_ , padding=a_ , max_length=a_ , return_attention_mask=a_ )
__lowercase= inputs.input_features
__lowercase= inputs.attention_mask
__lowercase= [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A (self ):
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= ['longest', 'max_length', 'do_not_pad']
__lowercase= [None, 1_6, None]
for max_length, padding in zip(a_ , a_ ):
__lowercase= feature_extractor(
a_ , max_length=a_ , padding=a_ , return_tensors='np' , return_attention_mask=a_ )
__lowercase= inputs.input_features
__lowercase= inputs.attention_mask
__lowercase= [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _A (self ):
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= feature_extractor(
a_ , padding='max_length' , max_length=4 , truncation=a_ , return_tensors='np' , return_attention_mask=a_ , )
__lowercase= inputs.input_features
__lowercase= inputs.attention_mask
__lowercase= np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _A (self ):
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= feature_extractor(
a_ , padding='longest' , max_length=4 , truncation=a_ , return_tensors='np' , return_attention_mask=a_ , )
__lowercase= inputs.input_features
__lowercase= inputs.attention_mask
__lowercase= np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
__lowercase= [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase= feature_extractor(
a_ , padding='longest' , max_length=1_6 , truncation=a_ , return_tensors='np' , return_attention_mask=a_ , )
__lowercase= inputs.input_features
__lowercase= inputs.attention_mask
__lowercase= np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def _A (self ):
import torch
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
__lowercase= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase= feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase= feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _A (self , lowerCAmelCase ):
from datasets import load_dataset
__lowercase= load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowercase= ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _A (self ):
__lowercase= np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__lowercase= self._load_datasamples(1 )
__lowercase= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase= feature_extractor(a_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , a_ , atol=1E-4 ) )
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCAmelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"""emoji""": True,
},
}
]
lowerCAmelCase = 0
for log in Path().glob('''*.log'''):
lowerCAmelCase = 0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase = line["""nodeid"""]
if line.get('''duration''', None) is not None:
lowerCAmelCase = F'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase = []
log.unlink()
lowerCAmelCase = """"""
lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase = []
lowerCAmelCase = {}
for test in failed_tests:
lowerCAmelCase = test[0].split('''::''')
lowerCAmelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase = [test[0] for test in failed_table]
lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCAmelCase = """Too many failed tests, please see the full report in the Action results."""
lowerCAmelCase = len(err) + 1_0
lowerCAmelCase = message[: 3_0_0_0 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
lowerCAmelCase = """No failed tests! 🤗"""
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCAmelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCAmelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase = row[0]
else:
lowerCAmelCase = """"""
lowerCAmelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ = None ) -> list[list[str]]:
'''simple docstring'''
__lowercase= word_bank or []
# create a table
__lowercase= len(a__ ) + 1
__lowercase= []
for _ in range(a__ ):
table.append([] )
# seed value
__lowercase= [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a__ )] == word:
__lowercase= [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a__ )]:
combination.reverse()
return table[len(a__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( __snake_case , __snake_case ):
UpperCamelCase_ : str ='nat'
UpperCamelCase_ : Optional[int] ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=6_4 , lowerCAmelCase=[3, 4, 6, 5] , lowerCAmelCase=[2, 4, 8, 1_6] , lowerCAmelCase=7 , lowerCAmelCase=3.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=0.0 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
__lowercase= patch_size
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= depths
__lowercase= len(lowerCAmelCase )
__lowercase= num_heads
__lowercase= kernel_size
__lowercase= mlp_ratio
__lowercase= qkv_bias
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= drop_path_rate
__lowercase= hidden_act
__lowercase= layer_norm_eps
__lowercase= initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase= int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
__lowercase= layer_scale_init_value
__lowercase= ['stem'] + [f'stage{idx}' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
__lowercase, __lowercase= get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Tuple =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Union[str, Any] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A ( A_ ):
UpperCamelCase_ : int =VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : int =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
_a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
_a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , )
__lowercase= titles if not isinstance(_a , _a ) else [titles]
__lowercase= texts if not isinstance(_a , _a ) else [texts]
__lowercase= len(_a )
__lowercase= questions if not isinstance(_a , _a ) else [questions] * n_passages
if len(_a ) != len(_a ):
raise ValueError(
f'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.' )
__lowercase= super().__call__(_a , _a , padding=_a , truncation=_a )['input_ids']
__lowercase= super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a , _a )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(_a )
__lowercase= sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(_a )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(_a , key=lambda lowerCAmelCase : x[1] , reverse=_a )
__lowercase= []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
__lowercase= end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase_ : int =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
"""simple docstring"""
def __init__(self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=8 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=1_6 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=3_6 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def _A (self ):
__lowercase= self.get_config()
__lowercase= 3_0_0
return config
def _A (self ):
(
__lowercase
)= self.prepare_config_and_inputs()
__lowercase= True
__lowercase= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= MraModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
__lowercase= model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
__lowercase= model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= True
__lowercase= MraModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
__lowercase= model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
__lowercase= model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= MraForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= MraForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= MraForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= MraForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= MraForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
__lowercase
)= config_and_inputs
__lowercase= {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =(
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Any =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Dict =False
UpperCamelCase_ : str =False
UpperCamelCase_ : Union[str, Any] =()
def _A (self ):
__lowercase= MraModelTester(self )
__lowercase= ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase= type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def _A (self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= MraModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def _A (self ):
return
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def _A (self ):
__lowercase= MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowercase= torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase= model(UpperCAmelCase_ )[0]
__lowercase= torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase_ )
__lowercase= torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def _A (self ):
__lowercase= MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowercase= torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase= model(UpperCAmelCase_ )[0]
__lowercase= 5_0_2_6_5
__lowercase= torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
__lowercase= torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def _A (self ):
__lowercase= MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowercase= torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowercase= model(UpperCAmelCase_ )[0]
__lowercase= 5_0_2_6_5
__lowercase= torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase_ )
__lowercase= torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
from __future__ import annotations
import pandas as pd
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0] * no_of_processes
__lowercase= [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowercase= burst_time[i]
__lowercase= 0
__lowercase= 0
__lowercase= 9_9_9_9_9_9_9_9_9
__lowercase= 0
__lowercase= False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowercase= remaining_time[j]
__lowercase= j
__lowercase= True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowercase= remaining_time[short]
if minm == 0:
__lowercase= 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__lowercase= False
# Find finish time of current process
__lowercase= increment_time + 1
# Calculate waiting time
__lowercase= finish_time - arrival_time[short]
__lowercase= finar - burst_time[short]
if waiting_time[short] < 0:
__lowercase= 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowercase= burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> None:
'''simple docstring'''
__lowercase= 0
__lowercase= 0
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowercase= total_waiting_time + waiting_time[i]
__lowercase= total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase = int(input())
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase ,lowerCAmelCase = map(int, input().split())
lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase = burst_time
lowerCAmelCase = no_of_processes
lowerCAmelCase = waiting_time
lowerCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCamelCase( lowercase__ ) -> List[Tuple[int, ...]]:
'''simple docstring'''
__lowercase= []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple[int, ...]:
'''simple docstring'''
__lowercase= []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
__lowercase= flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(lowercase__ ) -> None:
__lowercase= True
for i in range(len(UpperCamelCase__ ) ):
__lowercase= -1 * (i + 1)
l[reversed_idx] &= tally
__lowercase= l[reversed_idx]
if start_edges is None:
__lowercase= [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
__lowercase= [e == (d - 1) for e, d in zip(UpperCamelCase__ , UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__lowercase= []
__lowercase= []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__ , UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__ , s + 1 ) )
else:
break
__lowercase= tuple(UpperCamelCase__ )
__lowercase= len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase= start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowercase= end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowercase= end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> torch.Tensor:
'''simple docstring'''
__lowercase= t.shape[:no_batch_dims]
__lowercase= list(_flat_idx_to_idx(UpperCamelCase__ , UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
__lowercase= list(_flat_idx_to_idx(flat_end - 1 , UpperCamelCase__ ) )
# Get an ordered list of slices to perform
__lowercase= _get_minimal_slice_set(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
__lowercase= [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , lowercase__ = None , lowercase__ = False , ) -> Any:
'''simple docstring'''
if not (len(UpperCamelCase__ ) > 0):
raise ValueError('Must provide at least one input' )
__lowercase= [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
__lowercase= tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(lowercase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowercase= t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowercase= t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__lowercase= t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowercase= tensor_tree_map(_prep_inputs , UpperCamelCase__ )
__lowercase= None
if _out is not None:
__lowercase= tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__lowercase= 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowercase= flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowercase= 0
__lowercase= prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
__lowercase= _select_chunk
else:
__lowercase= partial(
_chunk_slice , flat_start=UpperCamelCase__ , flat_end=min(UpperCamelCase__ , i + chunk_size ) , no_batch_dims=len(UpperCamelCase__ ) , )
__lowercase= tensor_tree_map(UpperCamelCase__ , UpperCamelCase__ )
# Run the layer on the chunk
__lowercase= layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
__lowercase= tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
def assign(lowercase__ , lowercase__ ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
assign(UpperCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowercase= da[k]
assign(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__ , UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowercase= xa
elif isinstance(UpperCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowercase= output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
__lowercase= tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCamelCase__ )
return out
class A :
def __init__(self , lowerCAmelCase = 5_1_2 , ):
__lowercase= max_chunk_size
__lowercase= None
__lowercase= None
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowercase= [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowercase= [c for c in candidates if c > min_chunk_size]
__lowercase= [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase , chunk_size=__lowerCamelCase )
return True
except RuntimeError:
return False
__lowercase= 0
__lowercase= len(__lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
__lowercase= test_chunk_size(candidates[i] )
if not viable:
__lowercase= (min_viable_chunk_size_index + i) // 2
else:
__lowercase= i
__lowercase= (i + len(__lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= True
for aa, aa in zip(__lowerCamelCase , __lowerCamelCase ):
assert type(__lowerCamelCase ) == type(__lowerCamelCase )
if isinstance(__lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__lowercase= [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
__lowercase= [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= True
__lowercase= tree_map(lambda lowerCAmelCase : a.shape if isinstance(__lowerCamelCase , torch.Tensor ) else a , __lowerCamelCase , __lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCamelCase )
__lowercase= self._compare_arg_caches(self.cached_arg_data , __lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
__lowercase= False
if not consistent:
__lowercase= self._determine_favorable_chunk_size(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
__lowercase= arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase = '''scheduler_config.json'''
class A ( A_ ):
UpperCamelCase_ : List[Any] =1
UpperCamelCase_ : Tuple =2
UpperCamelCase_ : Optional[Any] =3
UpperCamelCase_ : Optional[int] =4
UpperCamelCase_ : Union[str, Any] =5
UpperCamelCase_ : int =6
UpperCamelCase_ : str =7
UpperCamelCase_ : Tuple =8
UpperCamelCase_ : str =9
UpperCamelCase_ : List[str] =10
UpperCamelCase_ : Dict =11
UpperCamelCase_ : Union[str, Any] =12
UpperCamelCase_ : Any =13
UpperCamelCase_ : int =14
@dataclass
class A ( A_ ):
UpperCamelCase_ : Optional[int] =42
class A :
UpperCamelCase_ : Dict =SCHEDULER_CONFIG_NAME
UpperCamelCase_ : List[str] =[]
UpperCamelCase_ : Any =True
@classmethod
def _A (cls , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , **lowerCAmelCase , ):
__lowercase, __lowercase, __lowercase= cls.load_config(
pretrained_model_name_or_path=_lowerCamelCase , subfolder=_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , return_commit_hash=_lowerCamelCase , **_lowerCamelCase , )
return cls.from_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase , **_lowerCamelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = False , **lowerCAmelCase ):
self.save_config(save_directory=_lowerCamelCase , push_to_hub=_lowerCamelCase , **_lowerCamelCase )
@property
def _A (self ):
return self._get_compatibles()
@classmethod
def _A (cls ):
__lowercase= list(set([cls.__name__] + cls._compatibles ) )
__lowercase= importlib.import_module(__name__.split('.' )[0] )
__lowercase= [
getattr(_lowerCamelCase , _lowerCamelCase ) for c in compatible_classes_str if hasattr(_lowerCamelCase , _lowerCamelCase )
]
return compatible_classes
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A ( unittest.TestCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=1_8 , lowerCAmelCase=3_0 , lowerCAmelCase=4_0_0 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= num_channels
__lowercase= image_size
__lowercase= min_resolution
__lowercase= max_resolution
__lowercase= do_resize
__lowercase= size if size is not None else {"height": 1_8, "width": 2_0}
__lowercase= do_thumbnail
__lowercase= do_align_axis
__lowercase= do_pad
__lowercase= do_normalize
__lowercase= image_mean
__lowercase= image_std
def _A (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Dict =DonutImageProcessor if is_vision_available() else None
def _A (self ):
__lowercase= DonutImageProcessingTester(self )
@property
def _A (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A (self ):
__lowercase= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_resize' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'do_thumbnail' ) )
self.assertTrue(hasattr(_a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_a , 'do_pad' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'image_mean' ) )
self.assertTrue(hasattr(_a , 'image_std' ) )
def _A (self ):
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def _A (self ):
pass
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
def _lowerCamelCase( lowercase__ = 1_0_0_0_0_0_0 ) -> Any:
'''simple docstring'''
__lowercase= [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
class A :
"""simple docstring"""
def __init__(self ):
__lowercase= """"""
__lowercase= """"""
__lowercase= []
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__lowercase= self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__lowercase= self.__min_dist_top_down_dp(snake_case_ , n - 1 )
__lowercase= self.__min_dist_top_down_dp(m - 1 , snake_case_ )
__lowercase= self.__min_dist_top_down_dp(m - 1 , n - 1 )
__lowercase= 1 + min(snake_case_ , snake_case_ , snake_case_ )
return self.dp[m][n]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= worda
__lowercase= worda
__lowercase= [[-1 for _ in range(len(snake_case_ ) )] for _ in range(len(snake_case_ ) )]
return self.__min_dist_top_down_dp(len(snake_case_ ) - 1 , len(snake_case_ ) - 1 )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= worda
__lowercase= worda
__lowercase= len(snake_case_ )
__lowercase= len(snake_case_ )
__lowercase= [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__lowercase= j
elif j == 0: # second string is empty
__lowercase= i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__lowercase= self.dp[i - 1][j - 1]
else:
__lowercase= self.dp[i][j - 1]
__lowercase= self.dp[i - 1][j]
__lowercase= self.dp[i - 1][j - 1]
__lowercase= 1 + min(snake_case_ , snake_case_ , snake_case_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowerCAmelCase = input('''Enter the first string: ''').strip()
lowerCAmelCase = input('''Enter the second string: ''').strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= SwinvaConfig()
__lowercase= swinva_name.split('_' )
__lowercase= name_split[1]
if "to" in name_split[3]:
__lowercase= int(name_split[3][-3:] )
else:
__lowercase= int(name_split[3] )
if "to" in name_split[2]:
__lowercase= int(name_split[2][-2:] )
else:
__lowercase= int(name_split[2][6:] )
if model_size == "tiny":
__lowercase= 9_6
__lowercase= (2, 2, 6, 2)
__lowercase= (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowercase= 9_6
__lowercase= (2, 2, 1_8, 2)
__lowercase= (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowercase= 1_2_8
__lowercase= (2, 2, 1_8, 2)
__lowercase= (4, 8, 1_6, 3_2)
else:
__lowercase= 1_9_2
__lowercase= (2, 2, 1_8, 2)
__lowercase= (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowercase= (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowercase= 2_1_8_4_1
__lowercase= '''huggingface/label-files'''
__lowercase= '''imagenet-22k-id2label.json'''
__lowercase= json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
else:
__lowercase= 1_0_0_0
__lowercase= '''huggingface/label-files'''
__lowercase= '''imagenet-1k-id2label.json'''
__lowercase= json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__lowercase= {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase= idalabel
__lowercase= {v: k for k, v in idalabel.items()}
__lowercase= img_size
__lowercase= num_classes
__lowercase= embed_dim
__lowercase= depths
__lowercase= num_heads
__lowercase= window_size
return config
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if "patch_embed.proj" in name:
__lowercase= name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase= name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase= '''encoder.''' + name
if "attn.proj" in name:
__lowercase= name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowercase= name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowercase= name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowercase= name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase= name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase= name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__lowercase= name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__lowercase= name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__lowercase= name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__lowercase= name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
__lowercase= '''layernorm.weight'''
if name == "norm.bias":
__lowercase= '''layernorm.bias'''
if "head" in name:
__lowercase= name.replace('head' , 'classifier' )
else:
__lowercase= '''swinv2.''' + name
return name
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase= orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase= key.split('.' )
__lowercase= int(key_split[1] )
__lowercase= int(key_split[3] )
__lowercase= model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase= val[:dim, :]
__lowercase= val[dim : dim * 2, :]
__lowercase= val[-dim:, :]
else:
__lowercase= val[:dim]
__lowercase= val[
dim : dim * 2
]
__lowercase= val[-dim:]
else:
__lowercase= val
return orig_state_dict
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
__lowercase= get_swinva_config(__lowerCAmelCase )
__lowercase= SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
__lowercase= convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
__lowercase= '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase= AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
__lowercase= Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
__lowercase= image_processor(images=__lowerCAmelCase , return_tensors='pt' )
__lowercase= timm_model(inputs['pixel_values'] )
__lowercase= model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase = logging.getLogger(__name__)
class A :
def __init__(self ):
__lowercase= False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if not self.initialized:
__lowercase= RagRetriever(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
__lowercase= True
def _A (self ):
self.retriever.index.init_index()
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.retriever._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class A ( __UpperCamelCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
if index is not None and index.is_initialized() and len(lowerCAmelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , index=lowerCAmelCase , init_retrieval=lowerCAmelCase , )
__lowercase= retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
for worker in self.retrieval_workers
] )
def _A (self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase= self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase= ray.get(random_worker.retrieve.remote(lowerCAmelCase , lowerCAmelCase ) )
else:
__lowercase= self._main_retrieve(lowerCAmelCase , lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase )
@classmethod
def _A (cls , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
return super(lowerCAmelCase , cls ).get_tokenizers(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
@classmethod
def _A (cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= kwargs.pop('config' , lowerCAmelCase ) or RagConfig.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
__lowercase= RagTokenizer.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
__lowercase= rag_tokenizer.question_encoder
__lowercase= rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase= 'custom'
__lowercase= CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase )
else:
__lowercase= cls._build_index(lowerCAmelCase )
return cls(
lowerCAmelCase , question_encoder_tokenizer=lowerCAmelCase , generator_tokenizer=lowerCAmelCase , retrieval_workers=lowerCAmelCase , index=lowerCAmelCase , )
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import random
from typing import Any
def _lowerCamelCase( lowercase__ ) -> list[Any]:
'''simple docstring'''
for _ in range(len(lowercase__ ) ):
__lowercase= random.randint(0 , len(lowercase__ ) - 1 )
__lowercase= random.randint(0 , len(lowercase__ ) - 1 )
__lowercase, __lowercase= data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase = ["""python""", """says""", """hello""", """!"""]
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( snake_case_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =PriorTransformer
UpperCamelCase_ : Any ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
import requests
lowerCAmelCase = '''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase = '''https://api.openweathermap.org/data/2.5/'''
def _lowerCamelCase( lowercase__ = "Chicago" , lowercase__ = APPID ) -> Dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCamelCase( lowercase__ = "Kolkata, India" , lowercase__ = APPID ) -> Optional[int]:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCamelCase( lowercase__ = 55.68 , lowercase__ = 12.57 , lowercase__ = APPID ) -> str:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = "x" , lowercase__ = 1_0**-1_0 , lowercase__ = 1 , ) -> complex:
'''simple docstring'''
__lowercase= symbols(_A )
__lowercase= lambdify(_A , _A )
__lowercase= lambdify(_A , diff(_A , _A ) )
__lowercase= starting_point
while True:
if diff_function(_A ) != 0:
__lowercase= prev_guess - multiplicity * func(_A ) / diff_function(
_A )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowercase= next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.0_0_5)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( snake_case_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Tuple =PhobertTokenizer
UpperCamelCase_ : List[Any] =False
def _A (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase= ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__lowercase= dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase= ['''#version: 0.2''', '''l à</w>''']
__lowercase= {'''unk_token''': '''<unk>'''}
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowercase= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def _A (self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _A (self , lowerCAmelCase ):
__lowercase= '''Tôi là VinAI Research'''
__lowercase= '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def _A (self ):
__lowercase= PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase= '''Tôi là VinAI Research'''
__lowercase= '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__lowercase= tokenizer.tokenize(lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowercase= tokens + [tokenizer.unk_token]
__lowercase= [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
from collections.abc import Callable
import numpy as np
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= int(np.ceil((x_end - xa) / step_size ) )
__lowercase= np.zeros((n + 1,) )
__lowercase= ya
__lowercase= xa
for k in range(A__ ):
__lowercase= y[k] + step_size * ode_func(A__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def _A (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def _A (self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase="uniform_average" , lowerCAmelCase=True ):
__lowercase= mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse}
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''T5Config'''
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> jnp.ndarray:
'''simple docstring'''
__lowercase= jnp.zeros_like(a__ )
__lowercase= shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase= shifted_input_ids.at[:, 0].set(a__ )
__lowercase= jnp.where(shifted_input_ids == -1_0_0 , a__ , a__ )
return shifted_input_ids
class A ( _lowercase ):
UpperCamelCase_ : int ='''mt5'''
UpperCamelCase_ : Optional[Any] =MTaConfig
class A ( _lowercase ):
UpperCamelCase_ : List[str] ='''mt5'''
UpperCamelCase_ : Any =MTaConfig
class A ( _lowercase ):
UpperCamelCase_ : str ='''mt5'''
UpperCamelCase_ : Tuple =MTaConfig
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class A ( __lowercase ):
UpperCamelCase_ : Tuple ='''bridgetower_vision_model'''
def __init__(self , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=3 , lowerCAmelCase=1_6 , lowerCAmelCase=2_8_8 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ):
super().__init__(**UpperCAmelCase__ )
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_channels
__lowercase= patch_size
__lowercase= image_size
__lowercase= initializer_factor
__lowercase= layer_norm_eps
__lowercase= stop_gradient
__lowercase= share_layernorm
__lowercase= remove_last_layer
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
__lowercase, __lowercase= cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class A ( __lowercase ):
UpperCamelCase_ : str ='''bridgetower_text_model'''
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_4 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(**UpperCAmelCase__ )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= initializer_factor
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= pad_token_id
__lowercase= bos_token_id
__lowercase= eos_token_id
@classmethod
def _A (cls , lowerCAmelCase , **lowerCAmelCase ):
__lowercase, __lowercase= cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('model_type' ) == "bridgetower":
__lowercase= config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class A ( __lowercase ):
UpperCamelCase_ : Optional[Any] ='''bridgetower'''
def __init__(self , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=7_6_8 , lowerCAmelCase=1 , lowerCAmelCase=1E-05 , lowerCAmelCase=False , lowerCAmelCase="add" , lowerCAmelCase=1_2 , lowerCAmelCase=6 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
# TODO: remove this once the Hub files are updated.
__lowercase= kwargs.pop('text_config_dict' , UpperCAmelCase__ )
__lowercase= kwargs.pop('vision_config_dict' , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
__lowercase= share_cross_modal_transformer_layers
__lowercase= hidden_act
__lowercase= hidden_size
__lowercase= initializer_factor
__lowercase= layer_norm_eps
__lowercase= share_link_tower_layers
__lowercase= link_tower_type
__lowercase= num_attention_heads
__lowercase= num_hidden_layers
__lowercase= tie_word_embeddings
__lowercase= init_layernorm_from_vision_encoder
if text_config is None:
__lowercase= {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
__lowercase= {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
__lowercase= BridgeTowerTextConfig(**UpperCAmelCase__ )
__lowercase= BridgeTowerVisionConfig(**UpperCAmelCase__ )
@classmethod
def _A (cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def _A (self ):
__lowercase= copy.deepcopy(self.__dict__ )
__lowercase= self.text_config.to_dict()
__lowercase= self.vision_config.to_dict()
__lowercase= self.__class__.model_type
return output
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
lowerCAmelCase = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
lowerCAmelCase = 0
for log in Path().glob('''*.log'''):
lowerCAmelCase = 0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase = json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase = line["nodeid"]
if line.get('''duration''', None) is not None:
lowerCAmelCase = F'{line["duration"]:.4f}'
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase = []
log.unlink()
lowerCAmelCase = ""
lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase = []
lowerCAmelCase = {}
for test in failed_tests:
lowerCAmelCase = test[0].split('''::''')
lowerCAmelCase = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase = [test[0] for test in failed_table]
lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
lowerCAmelCase = "Too many failed tests, please see the full report in the Action results."
lowerCAmelCase = len(err) + 1_0
lowerCAmelCase = message[: 3_0_0_0 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
lowerCAmelCase = "No failed tests! 🤗"
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
lowerCAmelCase = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
lowerCAmelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase = row[0]
else:
lowerCAmelCase = ""
lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
while second != 0:
__lowercase= first & second
first ^= second
__lowercase= c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = int(input('''Enter the first number: ''').strip())
lowerCAmelCase = int(input('''Enter the second number: ''').strip())
print(F'{add(first, second) = }')
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class A ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Dict ='''retribert'''
def __init__(self , lowerCAmelCase=3_0_5_2_2 , lowerCAmelCase=7_6_8 , lowerCAmelCase=8 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=1_2_8 , lowerCAmelCase=0 , **lowerCAmelCase , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= share_encoders
__lowercase= projection_dim
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase = logging.getLogger(__name__)
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_a , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_a , type=_a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_a , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
__lowercase= parser.parse_args()
return args
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
def fn(lowercase__ ):
return tokenizer(examples['text'] )
return fn
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= []
for i in range(len(tokenized_data['input_ids'] ) ):
__lowercase= {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
__lowercase= tf.train.Features(feature=_a )
__lowercase= tf.train.Example(features=_a )
__lowercase= example.SerializeToString()
records.append(_a )
return records
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase= min(len(_a ) , args.limit )
__lowercase= dataset.select(range(_a ) )
print(F'Limiting the dataset to {args.limit} entries.' )
__lowercase= AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase= os.path.join(args.output_dir , args.split )
if not os.path.exists(_a ):
os.makedirs(_a )
else:
__lowercase= os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase= tokenize_function(_a )
__lowercase= dataset.map(_a , batched=_a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase__ ):
# Concatenate all texts.
__lowercase= {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase= len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase= (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase= {
k: [t[i : i + args.max_length] for i in range(0 , _a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase= dataset_tokenized.map(_a , batched=_a , batch_size=1_0_0_0 , num_proc=4 )
__lowercase= 0
__lowercase= 0
for shard in range(0 , len(_a ) , args.shard_size ):
__lowercase= grouped_dataset[shard : shard + args.shard_size]
__lowercase= len(dataset_snapshot['input_ids'] )
__lowercase= os.path.join(_a , F'dataset-{shard_count}-{records_containing}.tfrecord' )
__lowercase= get_serialized_examples(_a )
with tf.io.TFRecordWriter(_a ) as out_file:
for i in range(len(_a ) ):
__lowercase= serialized_examples[i]
out_file.write(_a )
print('Wrote file {} containing {} records'.format(_a , _a ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , 'w' ) as f:
print(F'Total {args.split} records: {total_records}' , file=_a )
if __name__ == "__main__":
lowerCAmelCase = parse_args()
main(args)
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=True , lowercase__="pt" ) -> int:
'''simple docstring'''
__lowercase= {'add_prefix_space': True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(' ' ) else {}
__lowercase= padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=None , ) -> Tuple:
'''simple docstring'''
__lowercase= input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="train" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="" , ):
super().__init__()
__lowercase= Path(_lowercase ).joinpath(type_path + '.source' )
__lowercase= Path(_lowercase ).joinpath(type_path + '.target' )
__lowercase= self.get_char_lens(self.src_file )
__lowercase= max_source_length
__lowercase= max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
__lowercase= tokenizer
__lowercase= prefix
if n_obs is not None:
__lowercase= self.src_lens[:n_obs]
__lowercase= src_lang
__lowercase= tgt_lang
def __len__(self ):
return len(self.src_lens )
def __getitem__(self , lowerCAmelCase ):
__lowercase= index + 1 # linecache starts at 1
__lowercase= self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip('\n' )
__lowercase= linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip('\n' )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowercase= (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
__lowercase= self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
__lowercase= encode_line(_lowercase , _lowercase , self.max_source_length , 'right' )
__lowercase= encode_line(_lowercase , _lowercase , self.max_target_length , 'right' )
__lowercase= source_inputs['input_ids'].squeeze()
__lowercase= target_inputs['input_ids'].squeeze()
__lowercase= source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _A (lowerCAmelCase ):
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def _A (self , lowerCAmelCase ):
__lowercase= torch.stack([x['input_ids'] for x in batch] )
__lowercase= torch.stack([x['attention_mask'] for x in batch] )
__lowercase= torch.stack([x['decoder_input_ids'] for x in batch] )
__lowercase= (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
__lowercase= (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
__lowercase= trim_batch(_lowercase , _lowercase )
__lowercase, __lowercase= trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
__lowercase= {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase = getLogger(__name__)
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def _lowerCamelCase( lowercase__ ) -> None:
'''simple docstring'''
__lowercase= get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'git_log.json' ) )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=4 , **lowercase__ ) -> Tuple:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
__lowercase= {
'repo_id': str(SCREAMING_SNAKE_CASE_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List:
'''simple docstring'''
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
def remove_articles(lowercase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(lowercase__ ):
return " ".join(text.split() )
def remove_punc(lowercase__ ):
__lowercase= set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
__lowercase= normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
__lowercase= Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
__lowercase= sum(common.values() )
if num_same == 0:
return 0
__lowercase= 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
__lowercase= 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
__lowercase= (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
__lowercase= 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return model_prefix.startswith('rag' )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowercase= 'dropout_rate'
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
__lowercase= p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import requests
lowerCAmelCase = '''''' # <-- Put your OpenWeatherMap appid here!
lowerCAmelCase = '''https://api.openweathermap.org/data/2.5/'''
def _lowerCamelCase( lowercase__ = "Chicago" , lowercase__ = APPID ) -> List[Any]:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def _lowerCamelCase( lowercase__ = "Kolkata, India" , lowercase__ = APPID ) -> List[str]:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def _lowerCamelCase( lowercase__ = 55.68 , lowercase__ = 12.57 , lowercase__ = APPID ) -> List[Any]:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCAmelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger('''transformers.models.encodec''')
lowerCAmelCase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCAmelCase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCAmelCase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCAmelCase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCAmelCase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase = []
lowerCAmelCase = []
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
__lowercase= getattr(lowercase__ , lowercase__ )
if weight_type is not None:
__lowercase= getattr(lowercase__ , lowercase__ ).shape
else:
__lowercase= hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowercase= value
elif weight_type == "weight_g":
__lowercase= value
elif weight_type == "weight_v":
__lowercase= value
elif weight_type == "bias":
__lowercase= value
elif weight_type == "running_mean":
__lowercase= value
elif weight_type == "running_var":
__lowercase= value
elif weight_type == "num_batches_tracked":
__lowercase= value
elif weight_type == "weight_ih_l0":
__lowercase= value
elif weight_type == "weight_hh_l0":
__lowercase= value
elif weight_type == "bias_ih_l0":
__lowercase= value
elif weight_type == "bias_hh_l0":
__lowercase= value
elif weight_type == "weight_ih_l1":
__lowercase= value
elif weight_type == "weight_hh_l1":
__lowercase= value
elif weight_type == "bias_ih_l1":
__lowercase= value
elif weight_type == "bias_hh_l1":
__lowercase= value
else:
__lowercase= value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowercase= key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= []
if model_name == "encodec_24khz" or "encodec_32khz":
__lowercase= MAPPING_24K
elif model_name == "encodec_48khz":
__lowercase= MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowercase__ , lowercase__ ):
logger.info(F'{name} was ignored' )
continue
__lowercase= False
for key, mapped_key in MAPPING.items():
if "*" in key:
__lowercase= key.split('.*.' )
if prefix in name and suffix in name:
__lowercase= suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
__lowercase= True
if "*" in mapped_key:
__lowercase= name.split(lowercase__ )[0].split('.' )[-2]
__lowercase= mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
__lowercase= """weight_g"""
elif "weight_v" in name:
__lowercase= """weight_v"""
elif "weight_ih_l0" in name:
__lowercase= """weight_ih_l0"""
elif "weight_hh_l0" in name:
__lowercase= """weight_hh_l0"""
elif "bias_ih_l0" in name:
__lowercase= """bias_ih_l0"""
elif "bias_hh_l0" in name:
__lowercase= """bias_hh_l0"""
elif "weight_ih_l1" in name:
__lowercase= """weight_ih_l1"""
elif "weight_hh_l1" in name:
__lowercase= """weight_hh_l1"""
elif "bias_ih_l1" in name:
__lowercase= """bias_ih_l1"""
elif "bias_hh_l1" in name:
__lowercase= """bias_hh_l1"""
elif "bias" in name:
__lowercase= """bias"""
elif "weight" in name:
__lowercase= """weight"""
elif "running_mean" in name:
__lowercase= """running_mean"""
elif "running_var" in name:
__lowercase= """running_var"""
elif "num_batches_tracked" in name:
__lowercase= """num_batches_tracked"""
else:
__lowercase= None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> List[str]:
'''simple docstring'''
if config_path is not None:
__lowercase= EncodecConfig.from_pretrained(lowercase__ )
else:
__lowercase= EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__lowercase= [8, 5, 4, 4]
__lowercase= [2.2]
__lowercase= 6_4
__lowercase= 3_2_0_0_0
__lowercase= 2_0_4_8
__lowercase= False
__lowercase= False
__lowercase= False
elif model_name == "encodec_48khz":
__lowercase= [8, 5, 4, 2]
__lowercase= [3.0, 6.0, 12.0, 24.0]
__lowercase= 4_8_0_0_0
__lowercase= 2
__lowercase= False
__lowercase= """time_group_norm"""
__lowercase= True
__lowercase= 1.0
__lowercase= 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
__lowercase= EncodecModel(lowercase__ )
__lowercase= EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase__ )
__lowercase= torch.load(lowercase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__lowercase= original_checkpoint["""best_state"""]
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
model.save_pretrained(lowercase__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= model
__lowercase= 2
__lowercase= nn.Linear(self.model.config.hidden_size , self.num_labels )
def _A (self ):
pass
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= LongformerModel.from_pretrained(a__ )
__lowercase= LightningModel(a__ )
__lowercase= torch.load(a__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__lowercase= LongformerForQuestionAnswering.from_pretrained(a__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a__ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import math
def _lowerCamelCase( lowercase__ = 1_0_0 ) -> int:
'''simple docstring'''
__lowercase= sum(i * i for i in range(1 , n + 1 ) )
__lowercase= int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowercase= str(bin(lowerCamelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowercase= str(bin(lowerCamelCase__ ) )[2:]
if shift_amount >= len(lowerCamelCase__ ):
return "0b0"
__lowercase= binary_number[: len(lowerCamelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Tuple:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
__lowercase= '0' + str(bin(lowerCamelCase__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__lowercase= len(bin(lowerCamelCase__ )[3:] ) # Find 2's complement of number
__lowercase= bin(abs(lowerCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowercase= (
'1' + '0' * (binary_number_length - len(lowerCamelCase__ )) + binary_number
)
if shift_amount >= len(lowerCamelCase__ ):
return "0b" + binary_number[0] * len(lowerCamelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCamelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase = datasets.load_iris()
lowerCAmelCase = np.array(data['''data'''])
lowerCAmelCase = np.array(data['''target'''])
lowerCAmelCase = data['''target_names''']
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = train_test_split(X, y)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ) -> Optional[Any]:
'''simple docstring'''
__lowercase= zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
__lowercase= []
for data_point in data:
__lowercase= euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__lowercase= [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowercase= Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import sys
from collections import defaultdict
class A :
def __init__(self ):
__lowercase= []
def _A (self , lowerCAmelCase ):
return self.node_position[vertex]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= pos
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase= 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase= 2 * start + 1
else:
__lowercase= 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase, __lowercase= heap[smallest_child], positions[smallest_child]
__lowercase, __lowercase= (
heap[start],
positions[start],
)
__lowercase, __lowercase= temp, tempa
__lowercase= self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase )
self.top_to_bottom(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= position[index]
while index != 0:
__lowercase= int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase= heap[parent]
__lowercase= position[parent]
self.set_position(position[parent] , lowerCAmelCase )
else:
__lowercase= val
__lowercase= temp
self.set_position(lowerCAmelCase , lowerCAmelCase )
break
__lowercase= parent
else:
__lowercase= val
__lowercase= temp
self.set_position(lowerCAmelCase , 0 )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase ) // 2 - 1
for i in range(lowerCAmelCase , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase , lowerCAmelCase , len(lowerCAmelCase ) , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= positions[0]
__lowercase= sys.maxsize
self.top_to_bottom(lowerCAmelCase , 0 , len(lowerCAmelCase ) , lowerCAmelCase )
return temp
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= Heap()
__lowercase= [0] * len(UpperCAmelCase_ )
__lowercase= [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase= [] # Heap of Distance of vertices from their neighboring vertex
__lowercase= []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowercase= []
__lowercase= 1
__lowercase= sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase= 0
__lowercase= distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowercase= heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowercase= 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowercase= distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowercase= vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCAmelCase = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class A ( lowerCamelCase__ ):
UpperCamelCase_ : Tuple =field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : int =Features({'''audio''': Audio()} )
UpperCamelCase_ : List[str] =Features({'''labels''': ClassLabel} )
UpperCamelCase_ : Any ='''audio'''
UpperCamelCase_ : Dict ='''labels'''
def _A (self , lowerCAmelCase ):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__lowercase= copy.deepcopy(self )
__lowercase= self.label_schema.copy()
__lowercase= features[self.label_column]
__lowercase= label_schema
return task_template
@property
def _A (self ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= 0
for i in range(1 , 1_0_0_1 ):
total += i**i
return str(lowercase__ )[-1_0:]
if __name__ == "__main__":
print(solution())
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( _lowerCamelCase , unittest.TestCase ):
UpperCamelCase_ : Any =KandinskyVaaImgaImgPipeline
UpperCamelCase_ : str =['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCamelCase_ : Tuple =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCamelCase_ : Tuple =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : Tuple =False
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return self.time_input_dim
@property
def _A (self ):
return self.time_input_dim * 4
@property
def _A (self ):
return 1_0_0
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowercase= UNetaDConditionModel(**lowercase_ )
return model
@property
def _A (self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= VQModel(**self.dummy_movq_kwargs )
return model
def _A (self ):
__lowercase= self.dummy_unet
__lowercase= self.dummy_movq
__lowercase= {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__lowercase= DDIMScheduler(**lowercase_ )
__lowercase= {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
__lowercase= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
__lowercase= floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase_ )
# create init_image
__lowercase= floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
__lowercase= image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase= Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
if str(lowercase_ ).startswith('mps' ):
__lowercase= torch.manual_seed(lowercase_ )
else:
__lowercase= torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
__lowercase= {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _A (self ):
__lowercase= 'cpu'
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**lowercase_ )
__lowercase= pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
__lowercase= pipe(**self.get_dummy_inputs(lowercase_ ) )
__lowercase= output.images
__lowercase= pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
__lowercase= image[0, -3:, -3:, -1]
__lowercase= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase= np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowercase= 'A red cartoon frog, 4k'
__lowercase= KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
__lowercase= KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
__lowercase= pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
__lowercase= torch.Generator(device='cpu' ).manual_seed(0 )
__lowercase, __lowercase= pipe_prior(
lowercase_ , generator=lowercase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowercase= pipeline(
image=lowercase_ , image_embeds=lowercase_ , negative_image_embeds=lowercase_ , generator=lowercase_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='np' , )
__lowercase= output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
from math import factorial
lowerCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(A_ ) )
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
__lowercase= 7 * factorial(9 ) + 1
return sum(i for i in range(3 , A_ ) if sum_of_digit_factorial(A_ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
from __future__ import annotations
lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= []
__lowercase= len(_lowercase )
for i in range(_lowercase ):
__lowercase= -1
for j in range(i + 1 , _lowercase ):
if arr[i] < arr[j]:
__lowercase= arr[j]
break
result.append(_lowercase )
return result
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= []
for i, outer in enumerate(_lowercase ):
__lowercase= -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowercase= inner
break
result.append(_lowercase )
return result
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= len(_lowercase )
__lowercase= []
__lowercase= [-1] * arr_size
for index in reversed(range(_lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowercase= stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A ( UpperCAmelCase__ ):
@slow
@require_torch
def _A (self ):
__lowercase= EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= bertabert.config.encoder.vocab_size
__lowercase= tokenizer.sep_token_id
__lowercase= tokenizer.cls_token_id
__lowercase= 1_2_8
__lowercase= datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowercase= datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowercase= train_dataset.select(range(3_2 ) )
__lowercase= val_dataset.select(range(1_6 ) )
__lowercase= 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowercase= tokenizer(batch['article'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=5_1_2 )
__lowercase= tokenizer(batch['highlights'] , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=1_2_8 )
__lowercase= inputs.input_ids
__lowercase= inputs.attention_mask
__lowercase= outputs.input_ids
__lowercase= outputs.input_ids.copy()
__lowercase= [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowercase= outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase ):
__lowercase= pred.label_ids
__lowercase= pred.predictions
# all unnecessary tokens are removed
__lowercase= tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowercase= tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowercase= sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
__lowercase= train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowercase= val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowercase= self.get_auto_remove_tmp_dir()
__lowercase= SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy='steps' , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowercase= SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=1_0_2_4 ) -> Optional[Any]:
'''simple docstring'''
__lowercase, __lowercase= [], []
__lowercase= list(zip(_a , _a ) )
__lowercase, __lowercase= sorted_examples[0]
def is_too_big(lowercase__ ):
return tok(_a , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowercase= new_src + ' ' + src
__lowercase= new_tgt + ' ' + tgt
if is_too_big(_a ) or is_too_big(_a ): # cant fit, finalize example
finished_src.append(_a )
finished_tgt.append(_a )
__lowercase, __lowercase= src, tgt
else: # can fit, keep adding
__lowercase, __lowercase= cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_a )
finished_tgt.append(_a )
return finished_src, finished_tgt
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= Path(_a )
save_path.mkdir(exist_ok=_a )
for split in ["train"]:
__lowercase, __lowercase= data_dir / F'{split}.source', data_dir / F'{split}.target'
__lowercase= [x.rstrip() for x in Path(_a ).open().readlines()]
__lowercase= [x.rstrip() for x in Path(_a ).open().readlines()]
__lowercase, __lowercase= pack_examples(_a , _a , _a , _a )
print(F'packed {split} split from {len(_a )} examples -> {len(_a )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(_a ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(_a ) )
for split in ["val", "test"]:
__lowercase, __lowercase= data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(_a , save_path / F'{split}.source' )
shutil.copyfile(_a , save_path / F'{split}.target' )
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=_a , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=_a , default=1_2_8 )
parser.add_argument('--data_dir' , type=_a )
parser.add_argument('--save_path' , type=_a )
__lowercase= parser.parse_args()
__lowercase= AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
assert column_title.isupper()
__lowercase= 0
__lowercase= len(__lowerCAmelCase ) - 1
__lowercase= 0
while index >= 0:
__lowercase= (ord(column_title[index] ) - 6_4) * pow(2_6 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
from __future__ import annotations
from collections import deque
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(a__ )
self.set_fail_transitions()
def _A (self , lowerCAmelCase , lowerCAmelCase ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _A (self , lowerCAmelCase ):
__lowercase= 0
for character in keyword:
__lowercase= self.find_next_state(a__ , a__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowercase= len(self.adlist ) - 1
else:
__lowercase= next_state
self.adlist[current_state]["output"].append(a__ )
def _A (self ):
__lowercase= deque()
for node in self.adlist[0]["next_states"]:
q.append(a__ )
__lowercase= 0
while q:
__lowercase= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a__ )
__lowercase= self.adlist[r]['fail_state']
while (
self.find_next_state(a__ , self.adlist[child]['value'] ) is None
and state != 0
):
__lowercase= self.adlist[state]['fail_state']
__lowercase= self.find_next_state(
a__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__lowercase= 0
__lowercase= (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def _A (self , lowerCAmelCase ):
__lowercase= {} # returns a dict with keywords and list of its occurrences
__lowercase= 0
for i in range(len(a__ ) ):
while (
self.find_next_state(a__ , string[i] ) is None
and current_state != 0
):
__lowercase= self.adlist[current_state]['fail_state']
__lowercase= self.find_next_state(a__ , string[i] )
if next_state is None:
__lowercase= 0
else:
__lowercase= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowercase= []
result[key].append(i - len(a__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
if n_term == "":
return []
__lowercase= []
for temp in range(int(snake_case__ ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
lowerCAmelCase = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase = {
'squeezebert/squeezebert-uncased': 5_1_2,
'squeezebert/squeezebert-mnli': 5_1_2,
'squeezebert/squeezebert-mnli-headless': 5_1_2,
}
lowerCAmelCase = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class A ( _lowerCAmelCase ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : str =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Tuple =SqueezeBertTokenizer
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowercase= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__lowercase= getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__lowercase= do_lower_case
__lowercase= strip_accents
__lowercase= tokenize_chinese_chars
__lowercase= normalizer_class(**SCREAMING_SNAKE_CASE_ )
__lowercase= do_lower_case
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
__lowercase= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.sep_token_id]
__lowercase= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def _A (lowerCAmelCase ):
raise NotImplementedError()
@abstractmethod
def _A (self ):
raise NotImplementedError()
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= 1_3
__lowercase= 7
__lowercase= True
__lowercase= True
__lowercase= True
__lowercase= True
__lowercase= 9_9
__lowercase= 3_8_4
__lowercase= 2
__lowercase= 4
__lowercase= 3_7
__lowercase= 'gelu'
__lowercase= 0.1
__lowercase= 0.1
__lowercase= 5_1_2
__lowercase= 1_6
__lowercase= 2
__lowercase= 0.02
__lowercase= 3
__lowercase= 4
__lowercase= 1_2_8
__lowercase= 2
__lowercase= 9
__lowercase= 1
__lowercase= None
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertModel(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= [input_ids, input_mask]
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertForMaskedLM(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFConvBertForSequenceClassification(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= TFConvBertForMultipleChoice(config=lowerCAmelCase )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFConvBertForTokenClassification(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertForQuestionAnswering(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
__lowercase
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( _a , _a , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : List[Any] =False
def _A (self ):
__lowercase= TFConvBertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= True
__lowercase= True
if hasattr(lowerCAmelCase , 'use_cache' ):
__lowercase= True
__lowercase= getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= model_class(lowerCAmelCase )
__lowercase= len(model(lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase , saved_model=lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , 'saved_model' , '1' )
__lowercase= tf.keras.models.load_model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
if self.is_encoder_decoder:
__lowercase= outputs['encoder_hidden_states']
__lowercase= outputs['encoder_attentions']
else:
__lowercase= outputs['hidden_states']
__lowercase= outputs['attentions']
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
__lowercase= getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _A (self ):
__lowercase= TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= True
__lowercase= getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
def check_decoder_attentions_output(lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase= outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase ):
__lowercase= [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase= True
__lowercase= False
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= len(lowerCAmelCase )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
if self.is_encoder_decoder:
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_decoder_attentions_output(lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase= True
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
# Check attention is always last and order is fine
__lowercase= True
__lowercase= True
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase= tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase= model(lowerCAmelCase )[0]
__lowercase= [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 )
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCamelCase( lowercase__ = "" ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__lowercase= BeautifulSoup(requests.get(__UpperCamelCase ).text , 'html.parser' )
__lowercase= soup.find_all('td' , attrs='titleColumn' )
__lowercase= soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCamelCase , __UpperCamelCase )
}
def _lowerCamelCase( lowercase__ = "IMDb_Top_250_Movies.csv" ) -> Optional[Any]:
'''simple docstring'''
__lowercase= get_imdb_top_aaa_movies()
with open(__UpperCamelCase , 'w' , newline='' ) as out_file:
__lowercase= csv.writer(__UpperCamelCase )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _lowerCAmelCase ):
UpperCamelCase_ : Tuple =(UnCLIPScheduler,)
def _A (self , **lowerCAmelCase ):
__lowercase= {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_lowercase )
return config
def _A (self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _A (self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowercase )
def _A (self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _A (self ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_lowercase )
def _A (self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowercase )
def _A (self ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowercase , prev_timestep=_lowercase )
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config(variance_type='fixed_small_log' )
__lowercase= scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config(variance_type='learned_range' )
__lowercase= scheduler_class(**_lowercase )
__lowercase= 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_lowercase ) - -0.0_01_00_11 < 1E-5
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config()
__lowercase= scheduler_class(**_lowercase )
__lowercase= scheduler.timesteps
__lowercase= self.dummy_model()
__lowercase= self.dummy_sample_deter
__lowercase= torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__lowercase= model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__lowercase= scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
__lowercase= pred_prev_sample
__lowercase= torch.sum(torch.abs(_lowercase ) )
__lowercase= torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _A (self ):
__lowercase= self.scheduler_classes[0]
__lowercase= self.get_scheduler_config()
__lowercase= scheduler_class(**_lowercase )
scheduler.set_timesteps(2_5 )
__lowercase= scheduler.timesteps
__lowercase= self.dummy_model()
__lowercase= self.dummy_sample_deter
__lowercase= torch.manual_seed(0 )
for i, t in enumerate(_lowercase ):
# 1. predict noise residual
__lowercase= model(_lowercase , _lowercase )
if i + 1 == timesteps.shape[0]:
__lowercase= None
else:
__lowercase= timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase= scheduler.step(
_lowercase , _lowercase , _lowercase , prev_timestep=_lowercase , generator=_lowercase ).prev_sample
__lowercase= pred_prev_sample
__lowercase= torch.sum(torch.abs(_lowercase ) )
__lowercase= torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _A (self ):
pass
def _A (self ):
pass
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A ( _a ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] ="""open-llama"""
def __init__(self , lowerCAmelCase=1_0_0_0_0_0 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_1_0_0_8 , lowerCAmelCase=3_2 , lowerCAmelCase=3_2 , lowerCAmelCase="silu" , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-6 , lowerCAmelCase=True , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= hidden_size
__lowercase= intermediate_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= initializer_range
__lowercase= rms_norm_eps
__lowercase= use_cache
__lowercase= kwargs.pop(
'use_memorry_efficient_attention' , __lowerCAmelCase )
__lowercase= hidden_dropout_prob
__lowercase= attention_dropout_prob
__lowercase= use_stable_embedding
__lowercase= shared_input_output_embedding
__lowercase= rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , )
def _A (self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__lowercase= self.rope_scaling.get('type' , __lowerCAmelCase )
__lowercase= self.rope_scaling.get('factor' , __lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= int(__a )
# Initialize Result
__lowercase= []
# Traverse through all denomination
for denomination in reversed(__a ):
# Find denominations
while int(__a ) >= int(__a ):
total_value -= int(__a )
answer.append(__a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase = []
lowerCAmelCase = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
lowerCAmelCase = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCAmelCase = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'Following is minimal change for {value}: ')
lowerCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= Mock()
__lowercase= conn, Mock()
__lowercase= iter([1, None] )
__lowercase= lambda lowercase__ : next(A__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Any =ShapEPipeline
UpperCamelCase_ : Any =["prompt"]
UpperCamelCase_ : List[Any] =["prompt"]
UpperCamelCase_ : List[str] =[
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCamelCase_ : Dict =False
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return 3_2
@property
def _A (self ):
return self.time_input_dim * 4
@property
def _A (self ):
return 8
@property
def _A (self ):
__lowercase= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__lowercase= PriorTransformer(**snake_case_ )
return model
@property
def _A (self ):
torch.manual_seed(0 )
__lowercase= {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
__lowercase= ShapERenderer(**snake_case_ )
return model
def _A (self ):
__lowercase= self.dummy_prior
__lowercase= self.dummy_text_encoder
__lowercase= self.dummy_tokenizer
__lowercase= self.dummy_renderer
__lowercase= HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=snake_case_ , clip_sample=snake_case_ , clip_sample_range=1.0 , )
__lowercase= {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(snake_case_ ).startswith('mps' ):
__lowercase= torch.manual_seed(snake_case_ )
else:
__lowercase= torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowercase= {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def _A (self ):
__lowercase= '''cpu'''
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**snake_case_ )
__lowercase= pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowercase= pipe(**self.get_dummy_inputs(snake_case_ ) )
__lowercase= output.images[0]
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__lowercase= np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A (self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A (self ):
__lowercase= torch_device == '''cpu'''
__lowercase= True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case_ , relax_max_difference=snake_case_ , )
def _A (self ):
__lowercase= self.get_dummy_components()
__lowercase= self.pipeline_class(**snake_case_ )
__lowercase= pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowercase= 1
__lowercase= 2
__lowercase= self.get_dummy_inputs(snake_case_ )
for key in inputs.keys():
if key in self.batch_params:
__lowercase= batch_size * [inputs[key]]
__lowercase= pipe(**snake_case_ , num_images_per_prompt=snake_case_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowercase= ShapEPipeline.from_pretrained('openai/shap-e' )
__lowercase= pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowercase= torch.Generator(device=snake_case_ ).manual_seed(0 )
__lowercase= pipe(
'a shark' , generator=snake_case_ , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A ( unittest.TestCase , A_ ):
def _A (self ):
__lowercase= load_tool('text-classification' )
self.tool.setup()
__lowercase= load_tool('text-classification' , remote=lowerCAmelCase )
def _A (self ):
__lowercase= self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def _A (self ):
__lowercase= self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def _A (self ):
__lowercase= self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def _A (self ):
__lowercase= self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> int:
'''simple docstring'''
__lowercase= set()
# Replace all the whitespace in our sentence
__lowercase= input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> Any:
'''simple docstring'''
__lowercase= [False] * 2_6
for char in input_str:
if char.islower():
__lowercase= True
elif char.isupper():
__lowercase= True
return all(lowercase__ )
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> Tuple:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
from timeit import timeit
__lowercase= 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=lowercase__ ) )
print(timeit('is_pangram_faster()' , setup=lowercase__ ) )
print(timeit('is_pangram_fastest()' , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase = ['''gpt2''']
lowerCAmelCase = '''gpt2'''
if is_tf_available():
class A ( tf.Module ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= tokenizer
__lowercase= AutoConfig.from_pretrained(UpperCamelCase_ )
__lowercase= TFGPTaLMHeadModel.from_config(UpperCamelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _A (self , lowerCAmelCase ):
__lowercase= self.tokenizer(UpperCamelCase_ )
__lowercase= tokenized['input_ids'].to_tensor()
__lowercase= tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase= self.model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class A ( unittest.TestCase ):
def _A (self ):
super().setUp()
__lowercase= [GPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase= [TFGPTaTokenizer.from_pretrained(UpperCamelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase= [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__lowercase= list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _A (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase= tokenizer([test_inputs] , return_tensors='tf' )
__lowercase= tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase= python_outputs[key].numpy()
__lowercase= tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def _A (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase= tf.function(UpperCamelCase_ )
for test_inputs in self.test_sentences:
__lowercase= tf.constant(UpperCamelCase_ )
__lowercase= compiled_tokenizer(UpperCamelCase_ )
__lowercase= tf_tokenizer(UpperCamelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _A (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase= ModelToSave(tokenizer=UpperCamelCase_ )
__lowercase= tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase= model.serving(UpperCamelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase= Path(UpperCamelCase_ ) / 'saved.model'
tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': model.serving} )
__lowercase= tf.saved_model.load(UpperCamelCase_ )
__lowercase= loaded_model.signatures['serving_default'](UpperCamelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _A (self ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase= tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase= tf_tokenizer(UpperCamelCase_ ) # Build model with some sample inputs
__lowercase= tf_tokenizer.get_config()
__lowercase= TFGPTaTokenizer.from_config(UpperCamelCase_ )
__lowercase= model_from_config(UpperCamelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _A (self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase= 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase= tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase= tf_tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ )
__lowercase= out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A ( unittest.TestCase , lowerCamelCase__ ):
def _A (self ):
__lowercase= load_tool('text-to-speech' )
self.tool.setup()
def _A (self ):
torch.manual_seed(0 )
__lowercase= self.tool('hey' )
__lowercase= result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _A (self ):
torch.manual_seed(0 )
__lowercase= self.tool('hey' )
__lowercase= result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Tuple =IFInpaintingPipeline
UpperCamelCase_ : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCamelCase_ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Dict =PipelineTesterMixin.required_optional_params - {"""latents"""}
def _A (self ):
return self._get_dummy_components()
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(_A ).startswith('mps' ):
__lowercase= torch.manual_seed(_A )
else:
__lowercase= torch.Generator(device=_A ).manual_seed(_A )
__lowercase= floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A ) ).to(_A )
__lowercase= floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A ) ).to(_A )
__lowercase= {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _A (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _A (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _A (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _A (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _A (self ):
self._test_save_load_local()
def _A (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= BioGptModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__lowercase= model(__snake_case , attention_mask=__snake_case )
__lowercase= model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= BioGptForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__lowercase= model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= BioGptModel(config=__snake_case )
model.to(__snake_case )
model.eval()
# create attention mask
__lowercase= torch.ones(input_ids.shape , dtype=torch.long , device=__snake_case )
__lowercase= self.seq_length // 2
__lowercase= 0
# first forward pass
__lowercase= model(__snake_case , attention_mask=__snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase= ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowercase= ids_tensor((1,) , __snake_case ).item() + 1
__lowercase= ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowercase= random_other_next_tokens
# append to next input_ids and attn_mask
__lowercase= torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase= torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__snake_case )] , dim=1 , )
# get two different outputs
__lowercase= model(__snake_case , attention_mask=__snake_case )['last_hidden_state']
__lowercase= model(__snake_case , past_key_values=__snake_case , attention_mask=__snake_case )['last_hidden_state']
# select random slice
__lowercase= ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase= output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase= output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= BioGptModel(config=__snake_case ).to(__snake_case ).eval()
__lowercase= torch.ones(input_ids.shape , dtype=torch.long , device=__snake_case )
# first forward pass
__lowercase= model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
__lowercase= outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowercase= ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase= ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowercase= torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase= torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowercase= model(__snake_case , attention_mask=__snake_case )['last_hidden_state']
__lowercase= model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[
'last_hidden_state'
]
# select random slice
__lowercase= ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase= output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-3 ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= BioGptForCausalLM(__snake_case )
model.to(__snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowercase= model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _A (self , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= BioGptModel(__snake_case )
__lowercase= model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= BioGptForTokenClassification(__snake_case )
model.to(__snake_case )
model.eval()
__lowercase= model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
__lowercase
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase_ : Tuple =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ : Tuple =(
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[int] =False
def _A (self ):
__lowercase= BioGptModelTester(self )
__lowercase= ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase= type
self.model_tester.create_and_check_model(*__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__snake_case , gradient_checkpointing=__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__snake_case )
@slow
def _A (self ):
__lowercase= BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__snake_case )
__lowercase= BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowercase= 'left'
# Define PAD Token = EOS Token = 50256
__lowercase= tokenizer.eos_token
__lowercase= model.config.eos_token_id
# use different length sentences to test batching
__lowercase= [
'Hello, my dog is a little',
'Today, I',
]
__lowercase= tokenizer(__snake_case , return_tensors='pt' , padding=__snake_case )
__lowercase= inputs['input_ids'].to(__snake_case )
__lowercase= model.generate(
input_ids=__snake_case , attention_mask=inputs['attention_mask'].to(__snake_case ) , )
__lowercase= tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__snake_case )
__lowercase= model.generate(input_ids=__snake_case )
__lowercase= inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__lowercase= tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__snake_case )
__lowercase= model.generate(input_ids=__snake_case , max_length=model.config.max_length - num_paddings )
__lowercase= tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
__lowercase= tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
__lowercase= tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
__lowercase= [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _A (self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= BioGptModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= 3
__lowercase= input_dict['input_ids']
__lowercase= input_ids.ne(1 ).to(__snake_case )
__lowercase= ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase= BioGptForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__lowercase= model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= 3
__lowercase= 'multi_label_classification'
__lowercase= input_dict['input_ids']
__lowercase= input_ids.ne(1 ).to(__snake_case )
__lowercase= ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase= BioGptForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__lowercase= model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__lowercase= torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
__lowercase= model(__snake_case )[0]
__lowercase= 4_2_3_8_4
__lowercase= torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __snake_case )
__lowercase= torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def _A (self ):
__lowercase= BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowercase= BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__snake_case )
torch.manual_seed(0 )
__lowercase= tokenizer('COVID-19 is' , return_tensors='pt' ).to(__snake_case )
__lowercase= model.generate(
**__snake_case , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=__snake_case , )
__lowercase= tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
__lowercase= (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__snake_case , __snake_case )
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCAmelCase = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= cn.convert_to_negative(lowerCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCAmelCase__ , 1_1_0 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase= canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowercase= canny.canny(lowerCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
assert gg.gaussian_filter(lowerCAmelCase__ , 5 , sigma=0.9 ).all()
def _lowerCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowercase= conv.img_convolve(lowerCAmelCase__ , lowerCAmelCase__ ).astype(lowerCAmelCase__ )
assert res.any()
def _lowerCamelCase( ) -> Tuple:
'''simple docstring'''
assert med.median_filter(lowerCAmelCase__ , 3 ).any()
def _lowerCamelCase( ) -> Dict:
'''simple docstring'''
__lowercase, __lowercase= sob.sobel_filter(lowerCAmelCase__ )
assert grad.any() and theta.any()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= sp.make_sepia(lowerCAmelCase__ , 2_0 )
assert sepia.all()
def _lowerCamelCase( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
'''simple docstring'''
__lowercase= bs.Burkes(imread(lowerCAmelCase__ , 1 ) , 1_2_0 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
'''simple docstring'''
__lowercase= rs.NearestNeighbour(imread(lowerCAmelCase__ , 1 ) , 4_0_0 , 2_0_0 )
nn.process()
assert nn.output.any()
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__lowercase= imread(lowerCAmelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
__lowercase= 0
__lowercase= 0
__lowercase= image[x_coordinate][y_coordinate]
__lowercase= lbp.get_neighbors_pixel(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowercase= np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowercase= lbp.local_binary_value(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert lbp_image.any()
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
import functools
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= len(lowercase__ )
__lowercase= len(lowercase__ )
@functools.cache
def min_distance(lowercase__ , lowercase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowercase= int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class A ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ : List[str] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase_ : Dict ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase_ : Dict ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= ZeroShotClassificationPipeline(
model=__A , tokenizer=__A , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
# No kwarg
__lowercase= classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
__A , {'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowercase= classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
__A , {'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__lowercase= classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(__A , {'sequence': ANY(__A ), 'labels': [ANY(__A )], 'scores': [ANY(__A )]} )
# https://github.com/huggingface/transformers/issues/13846
__lowercase= classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]}
for i in range(1 )
] , )
__lowercase= classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'labels': [ANY(__A ), ANY(__A )], 'scores': [ANY(__A ), ANY(__A )]}
for i in range(2 )
] , )
with self.assertRaises(__A ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(__A ):
classifier(__A , candidate_labels='politics' )
with self.assertRaises(__A ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(__A ):
classifier('Who are you voting for in 2020?' , candidate_labels=__A )
with self.assertRaises(__A ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(__A ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__A , )
self.run_entailment_id(__A )
def _A (self , lowerCAmelCase ):
__lowercase= zero_shot_classifier.model.config
__lowercase= config.labelaid
__lowercase= zero_shot_classifier.entailment_id
__lowercase= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__lowercase= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowercase= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowercase= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__lowercase= original_labelaid
self.assertEqual(__A , zero_shot_classifier.entailment_id )
@require_torch
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def _A (self ):
__lowercase= pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def _A (self ):
__lowercase= pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowercase= zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__A , )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def _A (self ):
__lowercase= pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__lowercase= zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
__lowercase= zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__A , )
self.assertEqual(
nested_simplify(__A ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A ( A_ ):
def __init__(self , lowerCAmelCase="" , lowerCAmelCase="train" ):
assert os.path.isdir(__a )
__lowercase= []
__lowercase= os.listdir(__a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase= os.path.join(__a , __a )
if not os.path.isfile(__a ):
continue
self.documents.append(__a )
def __len__(self ):
return len(self.documents )
def __getitem__(self , lowerCAmelCase ):
__lowercase= self.documents[idx]
__lowercase= document_path.split('/' )[-1]
with open(__a , encoding='utf-8' ) as source:
__lowercase= source.read()
__lowercase, __lowercase= process_story(__a )
return document_name, story_lines, summary_lines
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= list(filter(lambda lowercase__ : len(__snake_case ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase= [_add_missing_period(__snake_case ) for line in nonempty_lines]
# gather article lines
__lowercase= []
__lowercase= deque(__snake_case )
while True:
try:
__lowercase= lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__snake_case )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase= list(filter(lambda lowercase__ : not t.startswith('@highlight' ) , __snake_case ) )
return story_lines, summary_lines
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if len(__snake_case ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__snake_case )) )
return sequence
def _lowerCamelCase( lowercase__ , lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= torch.ones_like(__snake_case )
__lowercase= sequence == pad_token_id
__lowercase= 0
return mask
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= [tokenizer.encode(__snake_case ) for line in story_lines]
__lowercase= [token for sentence in story_lines_token_ids for token in sentence]
__lowercase= [tokenizer.encode(__snake_case ) for line in summary_lines]
__lowercase= [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= []
for sequence in batch:
__lowercase= -1
__lowercase= []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__snake_case )
return torch.tensor(__snake_case )
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A ( _snake_case ):
UpperCamelCase_ : List[str] ='''time_series_transformer'''
UpperCamelCase_ : int ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__(self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "student_t" , lowerCAmelCase = "nll" , lowerCAmelCase = 1 , lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase = "mean" , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = 0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 3_2 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = True , lowerCAmelCase = "gelu" , lowerCAmelCase = 6_4 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1_0_0 , lowerCAmelCase = 0.02 , lowerCAmelCase=True , **lowerCAmelCase , ):
__lowercase= prediction_length
__lowercase= context_length or prediction_length
__lowercase= distribution_output
__lowercase= loss
__lowercase= input_size
__lowercase= num_time_features
__lowercase= lags_sequence
__lowercase= scaling
__lowercase= num_dynamic_real_features
__lowercase= num_static_real_features
__lowercase= num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowercase= cardinality
else:
__lowercase= [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowercase= embedding_dimension
else:
__lowercase= [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase= num_parallel_samples
# Transformer architecture configuration
__lowercase= input_size * len(UpperCamelCase__ ) + self._number_of_features
__lowercase= d_model
__lowercase= encoder_attention_heads
__lowercase= decoder_attention_heads
__lowercase= encoder_ffn_dim
__lowercase= decoder_ffn_dim
__lowercase= encoder_layers
__lowercase= decoder_layers
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= activation_function
__lowercase= init_std
__lowercase= use_cache
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def _A (self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowercase= Vector()
def _A (self ):
__lowercase= Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_A ) , '(0,0,0,0,0,1)' )
def _A (self ):
__lowercase= Vector([1, 2, 3, 4] )
self.assertEqual(len(_A ) , 4 )
def _A (self ):
__lowercase= Vector([1, 2] )
__lowercase= Vector([1, 2, 3, 4, 5] )
__lowercase= Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowercase= Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def _A (self ):
__lowercase= Vector([1, 2, 3] )
__lowercase= Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _A (self ):
__lowercase= Vector([1, 2, 3] )
__lowercase= Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _A (self ):
__lowercase= Vector([1, 2, 3] )
__lowercase= Vector([2, -1, 4] ) # for test of dot product
__lowercase= Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def _A (self ):
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def _A (self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def _A (self ):
__lowercase= Vector([1, 2, 3] )
__lowercase= Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _A , _A ) ) , '(3,4,7)' )
def _A (self ):
__lowercase= Vector([1, 0, 0, 0, 0, 0] )
__lowercase= x.copy()
self.assertEqual(str(_A ) , str(_A ) )
def _A (self ):
__lowercase= Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_A ) , '(0,1,0)' )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_A ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowercase= [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_A , _A ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowercase= [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_A , _A ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowercase= Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_A ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowercase= Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def _A (self ):
__lowercase= Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowercase= Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def _A (self ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
from math import factorial
lowerCAmelCase = {str(d): factorial(d) for d in range(1_0)}
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase_ : Tuple =StableDiffusionSAGPipeline
UpperCamelCase_ : Tuple =TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : List[str] =False
def _A (self ):
torch.manual_seed(0 )
__lowercase= UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowercase= DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowercase= AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowercase= CLIPTextModel(__lowercase )
__lowercase= CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowercase= {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _A (self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(__lowercase ).startswith('mps' ):
__lowercase= torch.manual_seed(__lowercase )
else:
__lowercase= torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase= {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _A (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowercase= sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__lowercase= '''.'''
__lowercase= torch.manual_seed(0 )
__lowercase= sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _A (self ):
__lowercase= StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase= sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__lowercase= '''.'''
__lowercase= torch.manual_seed(0 )
__lowercase= sag_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
__lowercase= output.images
__lowercase= image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _A (self ):
__lowercase= StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase= sag_pipe.to(__lowercase )
sag_pipe.set_progress_bar_config(disable=__lowercase )
__lowercase= '''.'''
__lowercase= torch.manual_seed(0 )
__lowercase= sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=__lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
__lowercase= output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
with open(snake_case_ ) as metadata_file:
__lowercase= json.load(snake_case_ )
__lowercase= LukeConfig(use_entity_aware_attention=snake_case_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowercase= torch.load(snake_case_ , map_location='cpu' )
# Load the entity vocab file
__lowercase= load_entity_vocab(snake_case_ )
__lowercase= RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowercase= AddedToken('<ent>' , lstrip=snake_case_ , rstrip=snake_case_ )
__lowercase= AddedToken('<ent2>' , lstrip=snake_case_ , rstrip=snake_case_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(snake_case_ , snake_case_ )
__lowercase= LukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
__lowercase= state_dict['embeddings.word_embeddings.weight']
__lowercase= word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowercase= word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowercase= torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowercase= F'encoder.layer.{layer_index}.attention.self.'
__lowercase= state_dict[prefix + matrix_name]
__lowercase= state_dict[prefix + matrix_name]
__lowercase= state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowercase= state_dict['entity_embeddings.entity_embeddings.weight']
__lowercase= entity_emb[entity_vocab['[MASK]']]
__lowercase= LukeModel(config=snake_case_ ).eval()
__lowercase, __lowercase= model.load_state_dict(snake_case_ , strict=snake_case_ )
if not (len(snake_case_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case_ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowercase= LukeTokenizer.from_pretrained(snake_case_ , task='entity_classification' )
__lowercase= (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowercase= (3_9, 4_2)
__lowercase= tokenizer(snake_case_ , entity_spans=[span] , add_prefix_space=snake_case_ , return_tensors='pt' )
__lowercase= model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
__lowercase= torch.Size((1, 4_2, 1_0_2_4) )
__lowercase= torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__lowercase= torch.Size((1, 4_2, 7_6_8) )
__lowercase= torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowercase= torch.Size((1, 1, 1_0_2_4) )
__lowercase= torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__lowercase= torch.Size((1, 1, 7_6_8) )
__lowercase= torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= {}
with open(snake_case_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(snake_case_ ):
__lowercase, __lowercase= line.rstrip().split('\t' )
__lowercase= index
return entity_vocab
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__(self , lowerCAmelCase ):
super().__init__()
__lowercase= model
__lowercase= 2
__lowercase= nn.Linear(self.model.config.hidden_size , self.num_labels )
def _A (self ):
pass
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= LongformerModel.from_pretrained(lowerCAmelCase_ )
__lowercase= LightningModel(lowerCAmelCase_ )
__lowercase= torch.load(lowerCAmelCase_ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__lowercase= LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase_ )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 369
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 304
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''▁'''
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCAmelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCAmelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCAmelCase = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCAmelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class A ( __snake_case ):
UpperCamelCase_ : List[str] =["input_ids"]
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple =RESOURCE_FILES_NAMES
def __init__(self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase="utf8" , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__lowercase= do_lower_case
__lowercase= sentencepiece_model_ckpt
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__lowercase= self.load_vocab(filepath=UpperCamelCase__ )
else:
__lowercase= {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
__lowercase= {v: k for k, v in self.vocab.items()}
def _A (self , lowerCAmelCase ):
if text is None:
return None
__lowercase= self.tokenize(UpperCamelCase__ )
__lowercase= "", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
__lowercase= self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
__lowercase= unicodedata.normalize('NFKC' , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
__lowercase= normalized_text, [], 0
if self.do_lower_case:
__lowercase= text.lower()
for token in split_tokens:
if token[:1] == "▁":
__lowercase= token[1:]
__lowercase= text[offset:].index(UpperCamelCase__ ) + offset
__lowercase= start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__lowercase= end
return token_mapping
@property
def _A (self ):
return len(self.vocab )
def _A (self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _A (self , lowerCAmelCase ):
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def _A (self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=6_4 , lowerCAmelCase=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__lowercase= True
if self.sp_model_kwargs.get('alpha' ) is not None:
__lowercase= self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__lowercase= self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__lowercase= self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
__lowercase= self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowercase= []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
__lowercase= 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
__lowercase= i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowercase= i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowercase= i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _A (self , lowerCAmelCase ):
__lowercase= "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def _A (self , lowerCAmelCase ):
__lowercase= self.convert_ids_to_tokens(UpperCamelCase__ )
__lowercase= "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def _A (self , lowerCAmelCase ):
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def _A (self , lowerCAmelCase ):
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase= [self.cls_token_id]
__lowercase= [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _A (self , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def _A (self , lowerCAmelCase ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _A (self , lowerCAmelCase ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _A (self , lowerCAmelCase ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _A (self , lowerCAmelCase ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
__lowercase= unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def _A (self , lowerCAmelCase ):
__lowercase= {}
with io.open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
__lowercase= line.rstrip('\n' )
__lowercase= int(UpperCamelCase__ )
return token_to_idx
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= 0
if os.path.isdir(UpperCamelCase__ ):
__lowercase= os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowercase= (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowercase= token_index
writer.write(token + '\n' )
index += 1
__lowercase= os.path.join(UpperCamelCase__ , 'sentencepiece.bpe.model' )
with open(UpperCamelCase__ , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 370
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 0
|
import argparse
import os
import re
lowerCAmelCase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase = re.compile(R'''\[([^\]]+)\]''')
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= _re_indent.search(__lowerCAmelCase )
return "" if search is None else search.groups()[0]
def _lowerCamelCase( lowercase__ , lowercase__="" , lowercase__=None , lowercase__=None ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__lowerCAmelCase ):
index += 1
__lowercase= ['\n'.join(lines[:index] )]
else:
__lowercase= []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase= [lines[index]]
index += 1
while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__lowerCAmelCase ) )
if index < len(__lowerCAmelCase ) - 1:
__lowercase= [lines[index + 1]]
index += 1
else:
__lowercase= []
else:
blocks.append('\n'.join(__lowerCAmelCase ) )
__lowercase= [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCAmelCase ) > 0:
blocks.append('\n'.join(__lowerCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCAmelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
def _inner(lowercase__ ):
return key(__lowerCAmelCase ).lower().replace('_' , '' )
return _inner
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> List[Any]:
'''simple docstring'''
def noop(lowercase__ ):
return x
if key is None:
__lowercase= noop
# Constants are all uppercase, they go first.
__lowercase= [obj for obj in objects if key(__lowerCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase= [obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase= [obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()]
__lowercase= ignore_underscore(__lowerCAmelCase )
return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
def _replace(lowercase__ ):
__lowercase= match.groups()[0]
if "," not in imports:
return F'[{imports}]'
__lowercase= [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase= keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(__lowerCAmelCase )] ) + "]"
__lowercase= import_statement.split('\n' )
if len(__lowerCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase= 2 if lines[1].strip() == '[' else 1
__lowercase= [(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase= sort_objects(__lowerCAmelCase , key=lambda lowercase__ : x[1] )
__lowercase= [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase= _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase= [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase= keys[:-1]
__lowercase= get_indent(lines[1] ) + ', '.join([F'"{k}"' for k in sort_objects(__lowerCAmelCase )] )
return "\n".join(__lowerCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
__lowercase= _re_bracket_content.sub(_replace , __lowerCAmelCase )
return import_statement
def _lowerCamelCase( lowercase__ , lowercase__=True ) -> Optional[Any]:
'''simple docstring'''
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase= split_code_in_indented_blocks(
__lowerCAmelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase= main_blocks[block_idx]
__lowercase= block.split('\n' )
# Get to the start of the imports.
__lowercase= 0
while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase= len(__lowerCAmelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase= '\n'.join(block_lines[line_idx:-1] )
__lowercase= get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase= split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase= _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase= [(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase= [(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None]
__lowercase= [x[0] for x in sorted(__lowerCAmelCase , key=lambda lowercase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase= 0
__lowercase= []
for i in range(len(__lowerCAmelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowercase= sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__lowerCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
__lowercase= '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCAmelCase ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__lowerCAmelCase ) )
def _lowerCamelCase( lowercase__=True ) -> str:
'''simple docstring'''
__lowercase= []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
__lowercase= sort_imports(os.path.join(__lowerCAmelCase , '__init__.py' ) , check_only=__lowerCAmelCase )
if result:
__lowercase= [os.path.join(__lowerCAmelCase , '__init__.py' )]
if len(__lowerCAmelCase ) > 0:
raise ValueError(F'Would overwrite {len(__lowerCAmelCase )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 371
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 304
| 0
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 350
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '''<<<<<<< This should probably be modified because it mentions: '''
lowerCAmelCase = '''=======
>>>>>>>
'''
lowerCAmelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__(self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= get_logger('datasets-cli/converting' )
__lowercase= tfds_path
__lowercase= datasets_directory
def _A (self ):
if os.path.isdir(self._tfds_path ):
__lowercase= os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__lowercase= os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
__lowercase= os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__lowercase= []
__lowercase= []
__lowercase= {}
if os.path.isdir(self._tfds_path ):
__lowercase= os.listdir(lowerCAmelCase )
else:
__lowercase= [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__lowercase= f.readlines()
__lowercase= []
__lowercase= False
__lowercase= False
__lowercase= []
for line in lines:
__lowercase= line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__lowercase= 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__lowercase= ''
continue
elif "from absl import logging" in out_line:
__lowercase= 'from datasets import logging\n'
elif "getLogger" in out_line:
__lowercase= out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__lowercase= True
__lowercase= list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
__lowercase= re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__lowercase= re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
__lowercase= 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__lowercase= True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__lowercase= f_name.replace('.py' , '' )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
__lowercase= os.path.basename(lowerCAmelCase )
__lowercase= imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 304
| 0
|
lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 351
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A ( A_ ):
UpperCamelCase_ : Any ='''esm'''
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_0_2_6 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , mask_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= emb_layer_norm_before
__lowercase= token_dropout
__lowercase= is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowercase= EsmFoldConfig()
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowercase= EsmFoldConfig(**lowerCAmelCase )
__lowercase= esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowercase= get_default_vocab_list()
else:
__lowercase= vocab_list
else:
__lowercase= None
__lowercase= None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowerCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def _A (self ):
__lowercase= super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase ):
__lowercase= self.esmfold_config.to_dict()
return output
@dataclass
class A :
UpperCamelCase_ : str =None
UpperCamelCase_ : bool =True
UpperCamelCase_ : bool =False
UpperCamelCase_ : bool =False
UpperCamelCase_ : bool =False
UpperCamelCase_ : float =0
UpperCamelCase_ : bool =True
UpperCamelCase_ : bool =False
UpperCamelCase_ : int =128
UpperCamelCase_ : "TrunkConfig" =None
def _A (self ):
if self.trunk is None:
__lowercase= TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase ):
__lowercase= TrunkConfig(**self.trunk )
def _A (self ):
__lowercase= asdict(self )
__lowercase= self.trunk.to_dict()
return output
@dataclass
class A :
UpperCamelCase_ : int =48
UpperCamelCase_ : int =1_024
UpperCamelCase_ : int =128
UpperCamelCase_ : int =32
UpperCamelCase_ : int =32
UpperCamelCase_ : int =32
UpperCamelCase_ : float =0
UpperCamelCase_ : float =0
UpperCamelCase_ : bool =False
UpperCamelCase_ : int =4
UpperCamelCase_ : Optional[int] =128
UpperCamelCase_ : "StructureModuleConfig" =None
def _A (self ):
if self.structure_module is None:
__lowercase= StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase ):
__lowercase= StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowercase= self.sequence_state_dim // self.sequence_head_width
__lowercase= self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def _A (self ):
__lowercase= asdict(self )
__lowercase= self.structure_module.to_dict()
return output
@dataclass
class A :
UpperCamelCase_ : int =384
UpperCamelCase_ : int =128
UpperCamelCase_ : int =16
UpperCamelCase_ : int =128
UpperCamelCase_ : int =12
UpperCamelCase_ : int =4
UpperCamelCase_ : int =8
UpperCamelCase_ : float =0.1
UpperCamelCase_ : int =8
UpperCamelCase_ : int =1
UpperCamelCase_ : int =2
UpperCamelCase_ : int =7
UpperCamelCase_ : int =10
UpperCamelCase_ : float =1e-8
UpperCamelCase_ : float =1e5
def _A (self ):
return asdict(self )
def _lowerCamelCase( ) -> int:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 352
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__lowercase= Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
__lowercase= transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowercase= transform(lowercase__ ).unsqueeze(0 ).to(lowercase__ )
return image
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
__lowercase= re.sub('visual_encoder*' , 'vision_model.encoder' , lowercase__ )
if "blocks" in key:
__lowercase= re.sub(R'blocks' , 'layers' , lowercase__ )
if "attn" in key:
__lowercase= re.sub(R'attn' , 'self_attn' , lowercase__ )
if "norm1" in key:
__lowercase= re.sub(R'norm1' , 'layer_norm1' , lowercase__ )
if "norm2" in key:
__lowercase= re.sub(R'norm2' , 'layer_norm2' , lowercase__ )
if "encoder.norm" in key:
__lowercase= re.sub(R'encoder.norm' , 'post_layernorm' , lowercase__ )
if "encoder.patch_embed.proj" in key:
__lowercase= re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , lowercase__ )
if "encoder.pos_embed" in key:
__lowercase= re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , lowercase__ )
if "encoder.cls_token" in key:
__lowercase= re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , lowercase__ )
if "self_attn" in key:
__lowercase= re.sub(R'self_attn.proj' , 'self_attn.projection' , lowercase__ )
return key
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__=None ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= BlipConfig.from_pretrained(lowercase__ )
else:
__lowercase= BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
__lowercase= BlipForConditionalGeneration(lowercase__ ).eval()
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__lowercase= blip_decoder(pretrained=lowercase__ , image_size=3_8_4 , vit='base' )
__lowercase= pt_model.eval()
__lowercase= pt_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
hf_model.load_state_dict(lowercase__ )
__lowercase= 3_8_4
__lowercase= load_demo_image(image_size=lowercase__ , device='cpu' )
__lowercase= BertTokenizer.from_pretrained('bert-base-uncased' )
__lowercase= tokenizer(['a picture of'] ).input_ids
__lowercase= hf_model.generate(lowercase__ , lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
__lowercase= hf_model.generate(lowercase__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowercase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowercase= (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__lowercase= blip_vqa(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
vqa_model.eval()
__lowercase= vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForQuestionAnswering(lowercase__ )
hf_vqa_model.load_state_dict(lowercase__ )
__lowercase= ['How many dogs are in this image?']
__lowercase= tokenizer(lowercase__ , return_tensors='pt' ).input_ids
__lowercase= hf_vqa_model.generate(lowercase__ , lowercase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__lowercase= 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__lowercase= blip_itm(pretrained=lowercase__ , image_size=lowercase__ , vit='base' )
itm_model.eval()
__lowercase= itm_model.state_dict()
for key in modified_state_dict.copy():
__lowercase= modified_state_dict.pop(lowercase__ )
__lowercase= rename_key(lowercase__ )
__lowercase= value
__lowercase= BlipForImageTextRetrieval(lowercase__ )
__lowercase= ['A picture of a woman with a dog sitting in a beach']
__lowercase= tokenizer(
lowercase__ , return_tensors='pt' , padding='max_length' , truncation=lowercase__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(lowercase__ )
hf_itm_model.eval()
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
__lowercase= hf_itm_model(lowercase__ , lowercase__ , use_itm_head=lowercase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 304
| 0
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__="attention" ) -> Dict:
'''simple docstring'''
__lowercase= params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
__lowercase= params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
__lowercase= params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
__lowercase= params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ) -> Any:
'''simple docstring'''
if split_mlp_wi:
__lowercase= params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
__lowercase= params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
__lowercase= (wi_a, wi_a)
else:
__lowercase= params[F'{prefix}/layers_{i}/mlp/wi/kernel']
__lowercase= params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def _lowerCamelCase( lowercase__ , *, lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= traverse_util.flatten_dict(variables['target'] )
__lowercase= {'/'.join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase= 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , lowercase__ )
__lowercase= collections.OrderedDict()
# Shared embeddings.
__lowercase= old['token_embedder/embedding']
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
__lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'encoder' , 'pre_attention_layer_norm' )
__lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'encoder' , 'attention' )
__lowercase= layer_norm
__lowercase= k.T
__lowercase= o.T
__lowercase= q.T
__lowercase= v.T
# Block i, layer 1 (MLP).
__lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'encoder' , 'pre_mlp_layer_norm' )
__lowercase, __lowercase= tax_mlp_lookup(lowercase__ , lowercase__ , 'encoder' , lowercase__ )
__lowercase= layer_norm
if split_mlp_wi:
__lowercase= wi[0].T
__lowercase= wi[1].T
else:
__lowercase= wi.T
__lowercase= wo.T
__lowercase= old[
'encoder/relpos_bias/rel_embedding'
].T
__lowercase= old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
__lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_self_attention_layer_norm' )
__lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'decoder' , 'self_attention' )
__lowercase= layer_norm
__lowercase= k.T
__lowercase= o.T
__lowercase= q.T
__lowercase= v.T
# Block i, layer 1 (Cross Attention).
__lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_cross_attention_layer_norm' )
__lowercase, __lowercase, __lowercase, __lowercase= tax_attention_lookup(lowercase__ , lowercase__ , 'decoder' , 'encoder_decoder_attention' )
__lowercase= layer_norm
__lowercase= k.T
__lowercase= o.T
__lowercase= q.T
__lowercase= v.T
# Block i, layer 2 (MLP).
__lowercase= tax_layer_norm_lookup(lowercase__ , lowercase__ , 'decoder' , 'pre_mlp_layer_norm' )
__lowercase, __lowercase= tax_mlp_lookup(lowercase__ , lowercase__ , 'decoder' , lowercase__ )
__lowercase= layer_norm
if split_mlp_wi:
__lowercase= wi[0].T
__lowercase= wi[1].T
else:
__lowercase= wi.T
__lowercase= wo.T
__lowercase= old['decoder/decoder_norm/scale']
__lowercase= old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase= old['decoder/logits_dense/kernel'].T
return new
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
__lowercase= collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase= state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase= state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__lowercase= state_dict['shared.weight']
return state_dict
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= checkpoints.load_tax_checkpoint(lowercase__ )
__lowercase= convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
__lowercase= make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ) -> int:
'''simple docstring'''
__lowercase= TaConfig.from_json_file(lowercase__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase= TaEncoderModel(lowercase__ )
else:
__lowercase= TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print('Done' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 353
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class A :
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
__lowercase= Node(lowerCAmelCase , self.head )
def __iter__(self ):
__lowercase= self.head
while node:
yield node.data
__lowercase= node.next_node
def __len__(self ):
return sum(1 for _ in self )
def __str__(self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(lowercase__ ) + list(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 304
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class A :
def __init__(self ):
__lowercase= []
__lowercase= 0
__lowercase= 0
def _A (self ):
return self.head == self.tail
def _A (self , lowerCAmelCase ):
self.data.append(lowerCAmelCase )
__lowercase= self.tail + 1
def _A (self ):
__lowercase= self.data[self.head]
__lowercase= self.head + 1
return ret
def _A (self ):
return self.tail - self.head
def _A (self ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= data
__lowercase= None
__lowercase= None
__lowercase= 1
def _A (self ):
return self.data
def _A (self ):
return self.left
def _A (self ):
return self.right
def _A (self ):
return self.height
def _A (self , lowerCAmelCase ):
__lowercase= data
def _A (self , lowerCAmelCase ):
__lowercase= node
def _A (self , lowerCAmelCase ):
__lowercase= node
def _A (self , lowerCAmelCase ):
__lowercase= height
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def _lowerCamelCase( lowercase__ ) -> MyNode:
'''simple docstring'''
print('left rotation node:' , node.get_data() )
__lowercase= node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase__ )
__lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase__ )
__lowercase= my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase__ )
return ret
def _lowerCamelCase( lowercase__ ) -> MyNode:
'''simple docstring'''
print('right rotation node:' , node.get_data() )
__lowercase= node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase__ )
__lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase__ )
__lowercase= my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase__ )
return ret
def _lowerCamelCase( lowercase__ ) -> MyNode:
'''simple docstring'''
__lowercase= node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase__ ) )
return right_rotation(lowercase__ )
def _lowerCamelCase( lowercase__ ) -> MyNode:
'''simple docstring'''
__lowercase= node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase__ ) )
return left_rotation(lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(lowercase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__lowercase= node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__lowercase= right_rotation(lowercase__ )
else:
__lowercase= lr_rotation(lowercase__ )
else:
node.set_right(insert_node(node.get_right() , lowercase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__lowercase= node.get_right()
assert right_child is not None
if data < right_child.get_data():
__lowercase= rl_rotation(lowercase__ )
else:
__lowercase= left_rotation(lowercase__ )
__lowercase= my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase__ )
return node
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
while True:
__lowercase= root.get_right()
if right_child is None:
break
__lowercase= right_child
return root.get_data()
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
while True:
__lowercase= root.get_left()
if left_child is None:
break
__lowercase= left_child
return root.get_data()
def _lowerCamelCase( lowercase__ , lowercase__ ) -> MyNode | None:
'''simple docstring'''
__lowercase= root.get_left()
__lowercase= root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__lowercase= get_left_most(lowercase__ )
root.set_data(lowercase__ )
root.set_right(del_node(lowercase__ , lowercase__ ) )
elif left_child is not None:
__lowercase= left_child
elif right_child is not None:
__lowercase= right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(lowercase__ , lowercase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase__ , lowercase__ ) )
if get_height(lowercase__ ) - get_height(lowercase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__lowercase= left_rotation(lowercase__ )
else:
__lowercase= rl_rotation(lowercase__ )
elif get_height(lowercase__ ) - get_height(lowercase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__lowercase= right_rotation(lowercase__ )
else:
__lowercase= lr_rotation(lowercase__ )
__lowercase= my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase__ )
return root
class A :
def __init__(self ):
__lowercase= None
def _A (self ):
return get_height(self.root )
def _A (self , lowerCAmelCase ):
print('insert:' + str(lowerCAmelCase ) )
__lowercase= insert_node(self.root , lowerCAmelCase )
def _A (self , lowerCAmelCase ):
print('delete:' + str(lowerCAmelCase ) )
if self.root is None:
print('Tree is empty!' )
return
__lowercase= del_node(self.root , lowerCAmelCase )
def __str__(self , ): # a level traversale, gives a more intuitive look on the tree
__lowercase= ''
__lowercase= MyQueue()
q.push(self.root )
__lowercase= self.get_height()
if layer == 0:
return output
__lowercase= 0
while not q.is_empty():
__lowercase= q.pop()
__lowercase= ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__lowercase= cnt + 1
for i in range(1_0_0 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
__lowercase= layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _lowerCamelCase( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCAmelCase = AVLtree()
lowerCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 354
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304
| 0
|
import cva
import numpy as np
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
if k in (0.04, 0.06):
__lowercase= k
__lowercase= window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
return str(self.k )
def _A (self , lowerCAmelCase ):
__lowercase= cva.imread(lowerCAmelCase , 0 )
__lowercase, __lowercase= img.shape
__lowercase= []
__lowercase= img.copy()
__lowercase= cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB )
__lowercase, __lowercase= np.gradient(lowerCAmelCase )
__lowercase= dx**2
__lowercase= dy**2
__lowercase= dx * dy
__lowercase= 0.04
__lowercase= self.window_size // 2
for y in range(lowerCAmelCase , h - offset ):
for x in range(lowerCAmelCase , w - offset ):
__lowercase= ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase= iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase= ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowercase= (wxx * wyy) - (wxy**2)
__lowercase= wxx + wyy
__lowercase= det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase = HarrisCorner(0.0_4, 3)
lowerCAmelCase ,lowerCAmelCase = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 355
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
| 0
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 358
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''▁'''
lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCAmelCase = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
lowerCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class A ( A_ ):
UpperCamelCase_ : Tuple =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[int] =[]
UpperCamelCase_ : List[int] =[]
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="m2m100" , lowerCAmelCase = None , lowerCAmelCase=8 , **lowerCAmelCase , ):
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase= language_codes
__lowercase= FAIRSEQ_LANGUAGE_CODES[language_codes]
__lowercase= {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__lowercase= kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , language_codes=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= vocab_file
__lowercase= load_json(lowerCAmelCase )
__lowercase= {v: k for k, v in self.encoder.items()}
__lowercase= spm_file
__lowercase= load_spm(lowerCAmelCase , self.sp_model_kwargs )
__lowercase= len(self.encoder )
__lowercase= {
self.get_lang_token(lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase )
}
__lowercase= {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase )}
__lowercase= {v: k for k, v in self.lang_token_to_id.items()}
__lowercase= src_lang if src_lang is not None else 'en'
__lowercase= tgt_lang
__lowercase= self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__lowercase= num_madeup_words
@property
def _A (self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _A (self ):
return self._src_lang
@src_lang.setter
def _A (self , lowerCAmelCase ):
__lowercase= new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _A (self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCAmelCase , self.encoder[self.unk_token] )
def _A (self , lowerCAmelCase ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCAmelCase , self.unk_token )
def _A (self , lowerCAmelCase ):
__lowercase= []
__lowercase= ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowercase= []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
__lowercase= [1] * len(self.prefix_tokens )
__lowercase= [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= load_spm(self.spm_file , self.sp_model_kwargs )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= Path(lowerCAmelCase )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__lowercase= save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__lowercase= save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (str(lowerCAmelCase ), str(lowerCAmelCase ))
def _A (self , lowerCAmelCase , lowerCAmelCase = "en" , lowerCAmelCase = None , lowerCAmelCase = "ro" , **lowerCAmelCase , ):
__lowercase= src_lang
__lowercase= tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowercase= src_lang
__lowercase= self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase )
__lowercase= self.get_lang_id(lowerCAmelCase )
__lowercase= tgt_lang_id
return inputs
def _A (self ):
self.set_src_lang_special_tokens(self.src_lang )
def _A (self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A (self , lowerCAmelCase ):
__lowercase= self.get_lang_token(lowerCAmelCase )
__lowercase= self.lang_token_to_id[lang_token]
__lowercase= [self.cur_lang_id]
__lowercase= [self.eos_token_id]
def _A (self , lowerCAmelCase ):
__lowercase= self.get_lang_token(lowerCAmelCase )
__lowercase= self.lang_token_to_id[lang_token]
__lowercase= [self.cur_lang_id]
__lowercase= [self.eos_token_id]
def _A (self , lowerCAmelCase ):
return self.lang_code_to_token[lang]
def _A (self , lowerCAmelCase ):
__lowercase= self.get_lang_token(lowerCAmelCase )
return self.lang_token_to_id[lang_token]
def _lowerCamelCase( lowercase__ , lowercase__ ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__lowercase= sentencepiece.SentencePieceProcessor(**lowercase__ )
spm.Load(str(lowercase__ ) )
return spm
def _lowerCamelCase( lowercase__ ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowercase__ , 'r' ) as f:
return json.load(lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> None:
'''simple docstring'''
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ , indent=2 )
| 359
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> List[str]:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 304
| 0
|
class A :
def __init__(self , lowerCAmelCase ):
# we need a list not a string, so do something to change the type
__lowercase= arr.split(',' )
def _A (self ):
__lowercase= [int(self.array[0] )] * len(self.array )
__lowercase= [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowercase= max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowercase= max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase = input('''please input some numbers:''')
lowerCAmelCase = SubArray(whole_array)
lowerCAmelCase = array.solve_sub_array()
print(('''the results is:''', re))
| 360
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= inspect.getfile(accelerate.test_utils )
__lowercase= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowercase= os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__lowercase= os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _A (self ):
print(f'Found {torch.cuda.device_count()} devices.' )
__lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _A (self ):
print(f'Found {torch.cuda.device_count()} devices.' )
__lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _A (self ):
__lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def _A (self ):
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
__lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase = Accelerator()
lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCAmelCase = ''''''
lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A ( unittest.TestCase ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=1_8 , lowerCAmelCase=3_0 , lowerCAmelCase=4_0_0 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= num_channels
__lowercase= image_size
__lowercase= min_resolution
__lowercase= max_resolution
__lowercase= do_resize
__lowercase= size if size is not None else {'height': 1_8, 'width': 2_0}
__lowercase= do_thumbnail
__lowercase= do_align_axis
__lowercase= do_pad
__lowercase= do_normalize
__lowercase= image_mean
__lowercase= image_std
def _A (self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =DonutImageProcessor if is_vision_available() else None
def _A (self ):
__lowercase= DonutImageProcessingTester(self )
@property
def _A (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A (self ):
__lowercase= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def _A (self ):
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
__lowercase= self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def _A (self ):
pass
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _A (self ):
# Initialize image_processing
__lowercase= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase= image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__lowercase= image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 362
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase = logging.get_logger(__name__)
# General docstring
lowerCAmelCase = '''RegNetConfig'''
# Base docstring
lowerCAmelCase = '''facebook/regnet-y-040'''
lowerCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCAmelCase = '''facebook/regnet-y-040'''
lowerCAmelCase = '''tabby, tabby cat'''
lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowercase= tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowercase= tf.keras.layers.ConvaD(
filters=lowerCAmelCase , kernel_size=lowerCAmelCase , strides=lowerCAmelCase , padding='VALID' , groups=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' , )
__lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
__lowercase= ACTaFN[activation] if activation is not None else tf.identity
def _A (self , lowerCAmelCase ):
__lowercase= self.convolution(self.padding(lowerCAmelCase ) )
__lowercase= self.normalization(lowerCAmelCase )
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= config.num_channels
__lowercase= TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _A (self , lowerCAmelCase ):
__lowercase= shape_list(lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 2, 3, 1) )
__lowercase= self.embedder(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = 2 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= tf.keras.layers.ConvaD(
filters=lowerCAmelCase , kernel_size=1 , strides=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' )
__lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def _A (self , lowerCAmelCase , lowerCAmelCase = False ):
return self.normalization(self.convolution(lowerCAmelCase ) , training=lowerCAmelCase )
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' )
__lowercase= [
tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _A (self , lowerCAmelCase ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowercase= self.pooler(lowerCAmelCase )
for layer_module in self.attention:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= in_channels != out_channels or stride != 1
__lowercase= max(1 , out_channels // config.groups_width )
__lowercase= (
TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowercase= [
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.2' ),
]
__lowercase= ACTaFN[config.hidden_act]
def _A (self , lowerCAmelCase ):
__lowercase= hidden_state
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= self.shortcut(lowerCAmelCase )
hidden_state += residual
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= in_channels != out_channels or stride != 1
__lowercase= max(1 , out_channels // config.groups_width )
__lowercase= (
TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__lowercase= [
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.3' ),
]
__lowercase= ACTaFN[config.hidden_act]
def _A (self , lowerCAmelCase ):
__lowercase= hidden_state
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= self.shortcut(lowerCAmelCase )
hidden_state += residual
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowercase= [
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , name='layers.0' ),
*[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _A (self , lowerCAmelCase ):
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__lowercase= zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase , name=f'stages.{i+1}' ) )
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
__lowercase= () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase= hidden_states + (hidden_state,)
__lowercase= stage_module(lowerCAmelCase )
if output_hidden_states:
__lowercase= hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
UpperCamelCase_ : Union[str, Any] =RegNetConfig
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= config
__lowercase= TFRegNetEmbeddings(lowerCAmelCase , name='embedder' )
__lowercase= TFRegNetEncoder(lowerCAmelCase , name='encoder' )
__lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' )
@unpack_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.embedder(lowerCAmelCase , training=lowerCAmelCase )
__lowercase= self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase )
__lowercase= encoder_outputs[0]
__lowercase= self.pooler(lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) )
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowercase= tuple([tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( A_ ):
UpperCamelCase_ : Optional[Any] =RegNetConfig
UpperCamelCase_ : Optional[int] ='''regnet'''
UpperCamelCase_ : Union[str, Any] ='''pixel_values'''
@property
def _A (self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , )
class A ( A_ ):
def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
__lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.regnet(
pixel_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A_ , )
class A ( A_ , A_ ):
def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
__lowercase= config.num_labels
__lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' )
# classification head
__lowercase= [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.regnet(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase )
__lowercase= outputs.pooler_output if return_dict else outputs[1]
__lowercase= self.classifier[0](lowerCAmelCase )
__lowercase= self.classifier[1](lowerCAmelCase )
__lowercase= None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase , logits=lowerCAmelCase )
if not return_dict:
__lowercase= (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
| 363
|
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304
| 0
|
import string
def _lowerCamelCase( lowercase__ ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowercase= ''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase= string.ascii_uppercase.find(lowercase__ )
__lowercase= num - key
if num < 0:
__lowercase= num + len(string.ascii_uppercase )
__lowercase= translated + string.ascii_uppercase[num]
else:
__lowercase= translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= input('Encrypted message: ' )
__lowercase= message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 364
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
from math import factorial, radians
def _lowerCamelCase( lowercase__ , lowercase__ = 1_8 , lowercase__ = 1_0 ) -> float:
'''simple docstring'''
__lowercase= angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowercase= radians(lowercase__ )
__lowercase= angle_in_radians
__lowercase= 3
__lowercase= -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
__lowercase= -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 304
| 0
|
from ... import PretrainedConfig
lowerCAmelCase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase_ : List[str] ='''nezha'''
def __init__(self , lowerCAmelCase=2_1_1_2_8 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=6_4 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= max_relative_position
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout
__lowercase= use_cache
| 366
|
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304
| 0
|
"""simple docstring"""
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= len(lowercase__ )
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if numbers[j] < numbers[i]:
__lowercase, __lowercase= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 367
|
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.