code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from random import randint, random
def a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : int = 5 , ) -> list:
__magic_name__: Optional[int] = [[-1] * number_of_cells] # Create a highway without any car
__magic_name__: Any = 0
__magic_name__: str = max(__UpperCAmelCase , 0 )
while i < number_of_cells:
__magic_name__: Optional[Any] = (
randint(0 , __UpperCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def a ( __UpperCAmelCase : list , __UpperCAmelCase : int ) -> int:
__magic_name__: str = 0
__magic_name__: Dict = highway_now[car_index + 1 :]
for cell in range(len(__UpperCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCAmelCase , -1 )
def a ( __UpperCAmelCase : list , __UpperCAmelCase : float , __UpperCAmelCase : int ) -> list:
__magic_name__: List[Any] = len(__UpperCAmelCase )
# Beforce calculations, the highway is empty
__magic_name__: List[Any] = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__magic_name__: List[str] = min(highway_now[car_index] + 1 , __UpperCAmelCase )
# Number of empty cell before the next car
__magic_name__: List[str] = get_distance(__UpperCAmelCase , __UpperCAmelCase ) - 1
# We can't have the car causing an accident
__magic_name__: Union[str, Any] = min(next_highway[car_index] , __UpperCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
__magic_name__: Optional[int] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def a ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : int ) -> list:
__magic_name__: Dict = len(highway[0] )
for i in range(__UpperCAmelCase ):
__magic_name__: Tuple = update(highway[i] , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__: List[str] = [-1] * number_of_cells
for car_index in range(__UpperCAmelCase ):
__magic_name__: str = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__magic_name__: List[Any] = (car_index + speed) % number_of_cells
# Commit the change of position
__magic_name__: Any = speed
highway.append(__UpperCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def A__ (snake_case : List[Any] ) -> Optional[Any]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def A__ (snake_case : int ) -> Any:
__UpperCamelCase : List[str] = create_tensor(snake_case )
__UpperCamelCase : List[Any] = gather(snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def A__ (snake_case : Optional[Any] ) -> Dict:
__UpperCamelCase : Any = [state.process_index]
__UpperCamelCase : int = gather_object(snake_case )
assert len(snake_case ) == state.num_processes, F'''{gathered_obj}, {len(snake_case )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def A__ (snake_case : Any ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = create_tensor(snake_case )
__UpperCamelCase : Any = broadcast(snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def A__ (snake_case : Tuple ) -> Dict:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__UpperCamelCase : List[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__UpperCamelCase : List[str] = torch.arange(state.num_processes ).to(state.device )
__UpperCamelCase : List[str] = pad_across_processes(snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def A__ (snake_case : Tuple ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCamelCase : Optional[int] = create_tensor(snake_case )
__UpperCamelCase : str = reduce(snake_case , """sum""" )
__UpperCamelCase : Optional[int] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A__ (snake_case : Optional[Any] ) -> Optional[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCamelCase : List[Any] = create_tensor(snake_case )
__UpperCamelCase : str = reduce(snake_case , """mean""" )
__UpperCamelCase : Union[str, Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def A__ (snake_case : Tuple ) -> int:
# For xla_spawn (TPUs)
main()
def A__ () -> List[Any]:
__UpperCamelCase : List[Any] = PartialState()
state.print(F'''State: {state}''' )
state.print("""testing gather""" )
test_gather(snake_case )
state.print("""testing gather_object""" )
test_gather_object(snake_case )
state.print("""testing broadcast""" )
test_broadcast(snake_case )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(snake_case )
state.print("""testing reduce_sum""" )
test_reduce_sum(snake_case )
state.print("""testing reduce_mean""" )
test_reduce_mean(snake_case )
if __name__ == "__main__":
main()
| 279 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = jnp.floataa
def snake_case_ ( self):
A__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__):
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
A__ = self.conv(__A)
return hidden_states
class _UpperCAmelCase ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = jnp.floataa
def snake_case_ ( self):
A__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
A__ = self.conv(__A)
return hidden_states
class _UpperCAmelCase ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = 0.0
UpperCamelCase__ = None
UpperCamelCase__ = jnp.floataa
def snake_case_ ( self):
A__ = self.in_channels if self.out_channels is None else self.out_channels
A__ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5)
A__ = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ = nn.Dense(__A , dtype=self.dtype)
A__ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5)
A__ = nn.Dropout(self.dropout_prob)
A__ = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A__ = None
if use_nin_shortcut:
A__ = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__=True):
A__ = hidden_states
A__ = self.norma(__A)
A__ = nn.swish(__A)
A__ = self.conva(__A)
A__ = self.time_emb_proj(nn.swish(__A))
A__ = jnp.expand_dims(jnp.expand_dims(__A , 1) , 1)
A__ = hidden_states + temb
A__ = self.norma(__A)
A__ = nn.swish(__A)
A__ = self.dropout(__A , __A)
A__ = self.conva(__A)
if self.conv_shortcut is not None:
A__ = self.conv_shortcut(__A)
return hidden_states + residual
| 718 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 526 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Optional[Any] = '''ylacombe/bark-small'''
a_ : Union[str, Any] = tempfile.mkdtemp()
a_ : Tuple = '''en_speaker_1'''
a_ : int = '''This is a test string'''
a_ : List[str] = '''speaker_embeddings_path.json'''
a_ : Tuple = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE ( self : List[str] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : Optional[int] = self.get_tokenizer()
a_ : List[str] = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a_ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a_ : Tuple = 35
a_ : Optional[Any] = 2
a_ : Any = 8
a_ : Any = {
'''semantic_prompt''': np.ones(__SCREAMING_SNAKE_CASE ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a_ : Optional[Any] = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a_ : Dict = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = processor(text=self.input_string , voice_preset=__SCREAMING_SNAKE_CASE )
a_ : str = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a_ : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
a_ : Union[str, Any] = self.get_tokenizer()
a_ : str = BarkProcessor(tokenizer=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = processor(text=self.input_string )
a_ : int = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 466 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
a_ : Optional[Any] = []
a_ : Optional[Any] = []
a_ : List[str] = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
a_ : int = len(__A ) if (len(__A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__A ) , '''Postfix'''.center(__A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__A ) == 0:
stack.append(__A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__A ) # push x to stack
print(
x.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
while len(__A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__A ) # return Postfix as str
def _UpperCAmelCase ( __A : Tuple ):
a_ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__A ) ):
if infix[i] == "(":
a_ : List[str] = ''')''' # change "(" to ")"
elif infix[i] == ")":
a_ : str = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__lowerCAmelCase = input('\nEnter an Infix Equation = ') # Input an Infix equation
__lowerCAmelCase = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 466 | 1 |
from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the reference grid
__snake_case : Optional[int] = 1
__snake_case : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the action grid
__snake_case : str = init[0]
__snake_case : Optional[int] = init[1]
__snake_case : Tuple = 0
__snake_case : Dict = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Union[str, Any] = [[f, g, x, y]]
__snake_case : List[Any] = False # flag that is set when search is complete
__snake_case : Any = False # flag set if we can't find expand
while not found and not resign:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : int = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : str = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[int] = True
else:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ): # to try out different valid actions
__snake_case : Dict = x + DIRECTIONS[i][0]
__snake_case : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(SCREAMING_SNAKE_CASE_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : int = g + cost
__snake_case : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Any = i
__snake_case : List[str] = []
__snake_case : Any = goal[0]
__snake_case : Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[Any] = xa
__snake_case : List[Any] = ya
invpath.append([x, y] )
__snake_case : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
path.append(invpath[len(SCREAMING_SNAKE_CASE_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
SCREAMING_SNAKE_CASE: Optional[int] = {str(digit): digit**5 for digit in range(1_0)}
def _a ( lowerCAmelCase )-> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _a ( )-> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution()) | 360 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE: str = '''bart'''
SCREAMING_SNAKE_CASE: int = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> int:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = qar_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
SCREAMING_SNAKE_CASE_ = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
SCREAMING_SNAKE_CASE_ = sas_model.eval()
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Union[str, Any]:
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE_ = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE_ = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
SCREAMING_SNAKE_CASE_ = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE_ = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (None, None)
SCREAMING_SNAKE_CASE_ = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def _a ( )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('eli5' , name='LFQA_reddit' )
SCREAMING_SNAKE_CASE_ = elia['train_eli5']
SCREAMING_SNAKE_CASE_ = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = load_train_data()
def _a ( lowerCAmelCase , lowerCAmelCase=10 )-> Tuple:
SCREAMING_SNAKE_CASE_ = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def _a ( lowerCAmelCase , lowerCAmelCase="wiki40b" , lowerCAmelCase="dense" , lowerCAmelCase=10 )-> Union[str, Any]:
if source == "none":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
SCREAMING_SNAKE_CASE_ = 'question: {} context: {}'.format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=64 , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=0.9_5 , lowerCAmelCase=0.8 )-> Tuple:
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE: List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE: List[Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE: Union[str, Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE: Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE: str = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE: Optional[Any] = action_list.index(action_st)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE: Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE: List[str] = 3
SCREAMING_SNAKE_CASE: Tuple = True
SCREAMING_SNAKE_CASE: Any = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE: int = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE: List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE: List[Any] = '''wiki40b'''
SCREAMING_SNAKE_CASE: Tuple = '''dense'''
SCREAMING_SNAKE_CASE: int = '''beam'''
SCREAMING_SNAKE_CASE: List[Any] = 2
SCREAMING_SNAKE_CASE: Optional[int] = 6_4
SCREAMING_SNAKE_CASE: Tuple = 2_5_6
SCREAMING_SNAKE_CASE: Optional[Any] = None
SCREAMING_SNAKE_CASE: int = None
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE: Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE: List[str] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE: Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE: Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE: Union[str, Any] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE: Optional[Any] = None
# start main text
SCREAMING_SNAKE_CASE: List[Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE: List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE: Dict = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE: int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[str] = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: List[Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
SCREAMING_SNAKE_CASE: Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE: List[Any] = support_list[:1_0]
SCREAMING_SNAKE_CASE: Union[str, Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE: Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE: Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE: Dict = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE: List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE: Optional[int] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE: str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE: Dict = find_nearest_training(question)
SCREAMING_SNAKE_CASE: int = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE: List[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE: str = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 360 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'sentencepiece.model'}
lowercase__ ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowercase__ ={
'google/rembert': 2_56,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , ):
super().__init__(
do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
a_ = do_lower_case
a_ = remove_space
a_ = keep_accents
a_ = vocab_file
a_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCAmelCase )
@property
def lowerCAmelCase__ ( self ):
return len(self.sp_model )
def lowerCAmelCase__ ( self ):
a_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
a_ = self.__dict__.copy()
a_ = None
return state
def __setstate__( self , UpperCAmelCase ):
a_ = d
a_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=False ):
a_ = self.sp_model.EncodeAsPieces(UpperCAmelCase )
return pieces
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.PieceToId(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
return self.sp_model.IdToPiece(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = self.sp_model.decode_pieces(UpperCAmelCase )
return out_string
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase ) )
return
a_ = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 511 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
lowercase__ ={
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = ['input_ids', 'attention_mask']
lowerCamelCase__ : Any = RobertaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
a_ = add_prefix_space
a_ = pre_tok_class(**UpperCAmelCase )
a_ = add_prefix_space
a_ = """post_processor"""
a_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
a_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ = tuple(state["""sep"""] )
if "cls" in state:
a_ = tuple(state["""cls"""] )
a_ = False
if state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = add_prefix_space
a_ = True
if state.get("""trim_offsets""" , UpperCAmelCase ) != trim_offsets:
a_ = trim_offsets
a_ = True
if changes_to_apply:
a_ = getattr(UpperCAmelCase , state.pop("""type""" ) )
a_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
a_ = value
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 511 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _lowercase :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[str] ) -> Optional[int]:
__snake_case = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> None:
__snake_case = 0
for character in keyword:
__snake_case = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case = len(self.adlist ) - 1
else:
__snake_case = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def a ( self : str ) -> None:
__snake_case = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__snake_case = 0
while q:
__snake_case = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__snake_case = self.adlist[r]['fail_state']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] ) is None
and state != 0
):
__snake_case = self.adlist[state]['fail_state']
__snake_case = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
__snake_case = 0
__snake_case = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> dict[str, list[int]]:
__snake_case = {} # returns a dict with keywords and list of its occurrences
__snake_case = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__snake_case = self.adlist[current_state]['fail_state']
__snake_case = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__snake_case = 0
else:
__snake_case = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import math
from collections.abc import Callable
def _a (lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ) -> float:
"""simple docstring"""
__snake_case = xa
__snake_case = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__snake_case = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 1_0**-5:
return x_na
__snake_case = x_na
__snake_case = x_na
def _a (lowercase__ : float ) -> float:
"""simple docstring"""
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __lowerCAmelCase ( __a ):
snake_case : Union[str, Any] = """van"""
def __init__(self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=3 , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCAmelCase__=[3, 3, 1_2, 3] , lowerCAmelCase__=[8, 8, 4, 4] , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-6 , lowerCAmelCase__=1e-2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = patch_sizes
_UpperCAmelCase : Union[str, Any] = strides
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : List[Any] = depths
_UpperCAmelCase : List[Any] = mlp_ratios
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : List[Any] = layer_scale_init_value
_UpperCAmelCase : Union[str, Any] = drop_path_rate
_UpperCAmelCase : Optional[Any] = dropout_rate
| 721 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase : Any = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
_UpperCAmelCase : Any = grid[row_n]
_UpperCAmelCase : Optional[Any] = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[int] = grid[row_n]
return grid[-1][-1]
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
a__ =size if size is not None else {'height': 18, 'width': 18}
a__ =parent
a__ =batch_size
a__ =num_channels
a__ =image_size
a__ =min_resolution
a__ =max_resolution
a__ =do_resize
a__ =size
a__ =do_normalize
a__ =image_mean
a__ =image_std
def __UpperCamelCase ( self) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =DPTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self) -> int:
a__ =DPTImageProcessingTester(self)
@property
def __UpperCamelCase ( self) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , 'image_mean'))
self.assertTrue(hasattr(lowercase_ , 'image_std'))
self.assertTrue(hasattr(lowercase_ , 'do_normalize'))
self.assertTrue(hasattr(lowercase_ , 'do_resize'))
self.assertTrue(hasattr(lowercase_ , 'size'))
def __UpperCamelCase ( self) -> Tuple:
a__ =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
a__ =self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def __UpperCamelCase ( self) -> Union[str, Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCamelCase ( self) -> int:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCamelCase ( self) -> Union[str, Any]:
# Initialize image_processing
a__ =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a__ =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
a__ =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a__ =image_processing(lowercase_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 20 |
import pprint
import requests
lowerCamelCase__ = "https://zenquotes.io/api"
def __A() -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __A() -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
lowerCamelCase__ = random_quotes()
pprint.pprint(response)
| 612 | 0 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = math.sqrt(lowercase_ )
__UpperCamelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase_ ):
for j in range(0 , lowercase_ ):
__UpperCamelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.zeros(img.shape )
__UpperCamelCase = get_gauss_kernel(lowercase_ , lowercase_ )
__UpperCamelCase , __UpperCamelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCamelCase = get_slice(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCamelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCamelCase = vec_gaussian(lowercase_ , lowercase_ )
__UpperCamelCase = np.multiply(lowercase_ , lowercase_ )
__UpperCamelCase = np.multiply(lowercase_ , lowercase_ )
__UpperCamelCase = np.sum(lowercase_ ) / np.sum(lowercase_ )
__UpperCamelCase = val
return imga
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
"""simple docstring"""
__UpperCamelCase = args[1] if args[1:] else '''../image_data/lena.jpg'''
__UpperCamelCase = float(args[2] ) if args[2:] else 1.0
__UpperCamelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCamelCase = int(args[4] )
__UpperCamelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCamelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a_ , a_ , a_ , a_ = parse_args(sys.argv)
a_ = cva.imread(filename, 0)
cva.imshow("input image", img)
a_ = img / 255
a_ = out.astype("float32")
a_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a_ = out * 255
a_ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 715 |
import os
import time
import numpy as np
import onnxruntime as ort
a_ = "1"
a_ = "0"
a_ = "1"
a_ = ort.SessionOptions()
a_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
a_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
a_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
a_ = ort.RunOptions()
a_ = 128
a_ = 1
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
a_ = time.time()
a_ = 2000
a_ = {}
for iter in range(max_iters):
a_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 375 | 0 |
def A__ ( snake_case_ : int = 2_000_000 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__: Any= 1
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= 1
SCREAMING_SNAKE_CASE__: List[str]= 0
for i in range(snake_case_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 5000_0000 ):
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE_ ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 413 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ =Features({"question": Value("string" ), "context": Value("string" )} )
UpperCAmelCase_ =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCAmelCase_ ="question"
UpperCAmelCase_ ="context"
UpperCAmelCase_ ="answers"
@property
def _UpperCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 597 |
from pathlib import Path
import fire
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(__lowerCamelCase )
dest_dir.mkdir(exist_ok=__lowerCamelCase )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ = dest_dir.joinpath(path.name )
print(__lowerCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 597 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['CLIPFeatureExtractor']
A = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE__):
__magic_name__ = ["""keras_nlp"""]
def __init__( self : int , *_lowercase : Optional[int] , **_lowercase : Dict ) -> List[Any]:
requires_backends(self , ["keras_nlp"] )
| 449 | 1 |
'''simple docstring'''
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = name
__SCREAMING_SNAKE_CASE : int = val
def __str__( self : Dict ):
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : List[Any] , _A : Optional[int] ):
"""simple docstring"""
return self.val < other.val
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
__SCREAMING_SNAKE_CASE : Tuple = self.build_heap(_A )
def __getitem__( self : Union[str, Any] , _A : Optional[Any] ):
"""simple docstring"""
return self.get_value(_A )
def UpperCAmelCase__ ( self : List[str] , _A : List[str] ):
"""simple docstring"""
return (idx - 1) // 2
def UpperCAmelCase__ ( self : Any , _A : str ):
"""simple docstring"""
return idx * 2 + 1
def UpperCAmelCase__ ( self : List[str] , _A : int ):
"""simple docstring"""
return idx * 2 + 2
def UpperCAmelCase__ ( self : Optional[int] , _A : str ):
"""simple docstring"""
return self.heap_dict[key]
def UpperCAmelCase__ ( self : str , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A ) - 1
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[Any] = idx
__SCREAMING_SNAKE_CASE : int = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def UpperCAmelCase__ ( self : str , _A : int , _A : List[str] ):
"""simple docstring"""
while True:
__SCREAMING_SNAKE_CASE : str = self.get_left_child_idx(_A ) # noqa: E741
__SCREAMING_SNAKE_CASE : Tuple = self.get_right_child_idx(_A )
__SCREAMING_SNAKE_CASE : Any = idx
if l < len(_A ) and array[l] < array[idx]:
__SCREAMING_SNAKE_CASE : List[Any] = l
if r < len(_A ) and array[r] < array[smallest]:
__SCREAMING_SNAKE_CASE : Optional[int] = r
if smallest != idx:
__SCREAMING_SNAKE_CASE : List[Any] = array[smallest], array[idx]
(
__SCREAMING_SNAKE_CASE
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__SCREAMING_SNAKE_CASE : Optional[int] = smallest
else:
break
def UpperCAmelCase__ ( self : Optional[int] , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
__SCREAMING_SNAKE_CASE : Dict = self.heap[idx], self.heap[p]
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__SCREAMING_SNAKE_CASE : Optional[int] = p
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_parent_idx(_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return self.heap[0]
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.heap[-1], self.heap[0]
__SCREAMING_SNAKE_CASE : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def UpperCAmelCase__ ( self : int , _A : Dict ):
"""simple docstring"""
self.heap.append(_A )
__SCREAMING_SNAKE_CASE : Tuple = len(self.heap ) - 1
__SCREAMING_SNAKE_CASE : Tuple = node.val
self.sift_up(len(self.heap ) - 1 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return len(self.heap ) == 0
def UpperCAmelCase__ ( self : int , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__SCREAMING_SNAKE_CASE : Union[str, Any] = new_value
__SCREAMING_SNAKE_CASE : Any = new_value
self.sift_up(self.idx_of_element[node] )
lowercase_ = Node("""R""", -1)
lowercase_ = Node("""B""", 6)
lowercase_ = Node("""A""", 3)
lowercase_ = Node("""X""", 1)
lowercase_ = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowercase_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load base model
__SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__SCREAMING_SNAKE_CASE : Any = load_file(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder
else:
__SCREAMING_SNAKE_CASE : int = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
__SCREAMING_SNAKE_CASE : Any = pipeline.unet
# find the target layer
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(snake_case ) > -1:
try:
__SCREAMING_SNAKE_CASE : Dict = curr_layer.__getattr__(snake_case )
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(snake_case ) == 0:
break
except Exception:
if len(snake_case ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__SCREAMING_SNAKE_CASE : Any = layer_infos.pop(0 )
__SCREAMING_SNAKE_CASE : int = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(snake_case )
else:
pair_keys.append(snake_case )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case ).unsqueeze(2 ).unsqueeze(3 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict[pair_keys[0]].to(torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case , snake_case )
# update visited list
for item in pair_keys:
visited.append(snake_case )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 131 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = checkpoints.load_tax_checkpoint(lowercase__ )
__lowercase = flatten_dict(lowercase__ )
return flax_params
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
__lowercase = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__lowercase = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowercase = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowercase = new_key.replace(lowercase__ , lowercase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowercase = new_key.replace(lowercase__ , lowercase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowercase = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , lowercase__ )
__lowercase = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowercase = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , lowercase__ )
__lowercase = flax_dict[key]
__lowercase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowercase = torch.from_numpy(converted_dict[key].T )
else:
__lowercase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ):
'''simple docstring'''
__lowercase = get_flax_param(lowercase__ )
if not use_large:
__lowercase = PixaStructVisionConfig()
__lowercase = PixaStructTextConfig()
else:
__lowercase = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
__lowercase = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
__lowercase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase__ )
__lowercase = PixaStructForConditionalGeneration(lowercase__ )
__lowercase = rename_and_convert_flax_params(lowercase__ )
model.load_state_dict(lowercase__ )
__lowercase = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
__lowercase = PixaStructImageProcessor()
__lowercase = PixaStructProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
if use_large:
__lowercase = 4_096
__lowercase = True
# mkdir if needed
os.makedirs(lowercase__ , exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
print("""Model saved in {}""".format(lowercase__ ) )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
__UpperCamelCase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 80 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = CodeGenTokenizer
__snake_case : Optional[int] = CodeGenTokenizerFast
__snake_case : int = True
__snake_case : Tuple = {"""add_prefix_space""": True}
__snake_case : Any = False
def A ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
lowerCAmelCase_ : int = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCAmelCase_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase_ : int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase ) )
def A ( self : Tuple , **UpperCAmelCase : Any ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A ( self : Optional[Any] , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Tuple = """lower newer"""
lowerCAmelCase_ : Optional[Any] = """lower newer"""
return input_text, output_text
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : Any = """lower newer"""
lowerCAmelCase_ : List[Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowerCAmelCase_ : List[str] = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Dict = self.get_tokenizer()
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = """lower newer"""
# Testing tokenization
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing the unknown token
lowerCAmelCase_ : str = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A ( self : Tuple , UpperCAmelCase : str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowerCAmelCase_ : Union[str, Any] = """This is a simple input"""
lowerCAmelCase_ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase_ : List[str] = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase_ : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="""max_length""" , )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
lowerCAmelCase_ : Dict = """This is a simple input"""
lowerCAmelCase_ : Union[str, Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
lowerCAmelCase_ : int = ("""This is a simple input""", """This is a pair""")
lowerCAmelCase_ : Optional[Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
lowerCAmelCase_ : Optional[Any] = tokenizer.pad_token_id
lowerCAmelCase_ : Tuple = tokenizer(UpperCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
lowerCAmelCase_ : List[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : int = tokenizer(*UpperCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
lowerCAmelCase_ : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def A ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = """$$$"""
lowerCAmelCase_ : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase )
lowerCAmelCase_ : Any = """This is a simple input"""
lowerCAmelCase_ : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCAmelCase_ : Optional[int] = tokenizer.bos_token_id
lowerCAmelCase_ : Any = tokenizer(UpperCAmelCase )
lowerCAmelCase_ : str = tokenizer(UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : str = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A ( self : int ):
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
lowerCAmelCase_ : List[Any] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
lowerCAmelCase_ : Union[str, Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
lowerCAmelCase_ : List[str] = tokenizer.encode(UpperCAmelCase )
lowerCAmelCase_ : List[str] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
lowerCAmelCase_ : Optional[Any] = tokenizer.decode(UpperCAmelCase , truncate_before_pattern=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
pass
| 600 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Any = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Dict = "rwkv"
__a : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self : int , __magic_name__ : Optional[Any]=5_02_77 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : List[Any]=40_96 , __magic_name__ : List[str]=32 , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Dict=1E-5 , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Any=6 , __magic_name__ : Dict=False , __magic_name__ : List[str]=True , **__magic_name__ : Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = context_length
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase_ : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase_ : Any = layer_norm_epsilon
UpperCAmelCase_ : Optional[int] = rescale_every
UpperCAmelCase_ : Optional[Any] = use_cache
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : int = eos_token_id
super().__init__(
tie_word_embeddings=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : int = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCAmelCase : Any = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCAmelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCAmelCase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Optional[int] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase : Union[str, Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase : Union[str, Any] = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(__magic_name__ )
class lowerCAmelCase__ :
def __call__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Union[bool, str] = False , snake_case__ : Union[bool, str] = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[bool] = None , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
elif titles is None or texts is None:
UpperCAmelCase__ : str = titles if texts is None else texts
return super().__call__(
snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = titles if not isinstance(snake_case__ , snake_case__ ) else [titles]
UpperCAmelCase__ : int = texts if not isinstance(snake_case__ , snake_case__ ) else [texts]
UpperCAmelCase__ : str = len(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
f'There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts.' )
UpperCAmelCase__ : Dict = super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
UpperCAmelCase__ : Dict = super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
UpperCAmelCase__ : Optional[int] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ )
]
}
if return_attention_mask is not False:
UpperCAmelCase__ : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase__ : Any = attention_mask
return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ )
def __a ( self : Any , snake_case__ : BatchEncoding , snake_case__ : DPRReaderOutput , snake_case__ : int = 1_6 , snake_case__ : int = 6_4 , snake_case__ : int = 4 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = reader_input["input_ids"]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = reader_output[:3]
UpperCAmelCase__ : List[str] = len(snake_case__ )
UpperCAmelCase__ : int = sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ )
UpperCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase__ : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase__ : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase__ : Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase__ : Optional[Any] = len(snake_case__ )
UpperCAmelCase__ : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : List[int] , snake_case__ : int , snake_case__ : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for start_index, start_score in enumerate(snake_case__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase__ : Union[str, Any] = sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ )
UpperCAmelCase__ : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
UpperCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__magic_name__ )
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
| 438 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case , 2 ) - pow(snake_case , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case , 2 ) - pow(snake_case , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case , 2 ) + pow(snake_case , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="umt5"
__a =["past_key_values"]
def __init__( self , lowerCamelCase=25_0112 , lowerCamelCase=512 , lowerCamelCase=64 , lowerCamelCase=1024 , lowerCamelCase=8 , lowerCamelCase=None , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=128 , lowerCamelCase=0.1 , lowerCamelCase=1e-6 , lowerCamelCase=1.0 , lowerCamelCase="gated-gelu" , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="T5Tokenizer" , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=0 , **lowerCamelCase , ) ->Dict:
'''simple docstring'''
super().__init__(
is_encoder_decoder=lowerCamelCase , tokenizer_class=lowerCamelCase , tie_word_embeddings=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , decoder_start_token_id=lowerCamelCase , **lowerCamelCase , )
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_heads
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = self.feed_forward_proj.split('-' )
__a = act_info[-1]
__a = act_info[0] == 'gated'
if len(lowerCamelCase ) > 1 and act_info[0] != "gated" or len(lowerCamelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a = 'gelu_new'
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return self.d_model
@property
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return self.num_heads
@property
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
return self.num_layers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCamelCase ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__a = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a = 'past_encoder_sequence + sequence'
__a = {0: 'batch'}
__a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a = {0: 'batch', 1: 'decoder_sequence'}
__a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return 13
@property
def __UpperCamelCase ( self ) ->float:
'''simple docstring'''
return 5e-4 | 713 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =IFPipeline
__a =TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return self._get_dummy_components()
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=0 ) ->Tuple:
'''simple docstring'''
if str(lowerCamelCase ).startswith('mps' ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
self._test_save_load_local()
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
# if
__a = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
__a = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
__a , __a = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__a = None
__a = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__a = IFImgaImgPipeline(**pipe_a.components )
__a = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__a = IFInpaintingPipeline(**pipe_a.components )
__a = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , original_image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase )
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , original_image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 270 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435 | '''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = CodeGenTokenizer
UpperCAmelCase__ = CodeGenTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {'''add_prefix_space''': True}
UpperCAmelCase__ = False
def snake_case__ ( self : str ) ->str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_UpperCamelCase : Optional[int] = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_UpperCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase__ ) )
def snake_case__ ( self : Union[str, Any] , **lowercase__ : int ) ->str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : int , **lowercase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : str , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : int = "lower newer"
_UpperCamelCase : int = "lower newer"
return input_text, output_text
def snake_case__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase : Dict = "lower newer"
_UpperCamelCase : Any = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_UpperCamelCase : str = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_UpperCamelCase : str = tokens + [tokenizer.unk_token]
_UpperCamelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase : Union[str, Any] = self.get_tokenizer()
_UpperCamelCase : Tuple = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : Any = "lower newer"
# Testing tokenization
_UpperCamelCase : Optional[int] = tokenizer.tokenize(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[Any] = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids without special tokens
_UpperCamelCase : List[str] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Dict = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids with special tokens
_UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=lowercase__ )
_UpperCamelCase : str = tokenizer.encode(lowercase__ , add_prefix_space=lowercase__ )
_UpperCamelCase : Optional[int] = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing the unknown token
_UpperCamelCase : Optional[Any] = tokens + [rust_tokenizer.unk_token]
_UpperCamelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def snake_case__ ( self : Any , *lowercase__ : Union[str, Any] , **lowercase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] , lowercase__ : str=15 ) ->Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
# Simple input
_UpperCamelCase : str = "This is a simple input"
_UpperCamelCase : List[str] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Tuple = ("This is a simple input", "This is a pair")
_UpperCamelCase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding="max_length" , )
def snake_case__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
_UpperCamelCase : Dict = "This is a simple input"
_UpperCamelCase : Any = ["This is a simple input looooooooong", "This is a simple input"]
_UpperCamelCase : Union[str, Any] = ("This is a simple input", "This is a pair")
_UpperCamelCase : Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_UpperCamelCase : Tuple = tokenizer.pad_token_id
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding="max_length" , max_length=30 , return_tensors="np" )
_UpperCamelCase : Optional[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
_UpperCamelCase : Union[str, Any] = tokenizer(*lowercase__ , padding="max_length" , max_length=60 , return_tensors="np" )
_UpperCamelCase : List[Any] = tokenizer(lowercase__ , padding=lowercase__ , truncate=lowercase__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case__ ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = "$$$"
_UpperCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase__ , add_bos_token=lowercase__ )
_UpperCamelCase : List[Any] = "This is a simple input"
_UpperCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
_UpperCamelCase : Optional[Any] = tokenizer.bos_token_id
_UpperCamelCase : str = tokenizer(lowercase__ )
_UpperCamelCase : Any = tokenizer(lowercase__ )
self.assertEqual(out_s.input_ids[0] , lowercase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCamelCase : int = tokenizer.decode(out_s.input_ids )
_UpperCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def snake_case__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_UpperCamelCase : Any = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_UpperCamelCase : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
_UpperCamelCase : str = tokenizer.encode(lowercase__ )
_UpperCamelCase : List[Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_UpperCamelCase : Optional[int] = tokenizer.decode(lowercase__ , truncate_before_pattern=lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
pass
| 435 | 1 |
"""simple docstring"""
_A = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
_A = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> float:
'''simple docstring'''
__UpperCamelCase = from_type.lower().strip("s" )
__UpperCamelCase = to_type.lower().strip("s" )
__UpperCamelCase = UNIT_SYMBOL.get(_snake_case ,_snake_case )
__UpperCamelCase = UNIT_SYMBOL.get(_snake_case ,_snake_case )
if from_sanitized not in METRIC_CONVERSION:
__UpperCamelCase = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
__UpperCamelCase = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(_snake_case )}"""
)
raise ValueError(_snake_case )
__UpperCamelCase = METRIC_CONVERSION[from_sanitized]
__UpperCamelCase = METRIC_CONVERSION[to_sanitized]
__UpperCamelCase = 1
if from_exponent > to_exponent:
__UpperCamelCase = from_exponent - to_exponent
else:
__UpperCamelCase = -(to_exponent - from_exponent)
return value * pow(10 ,_snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod() | 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure) | 228 | 1 |
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case_ : List[Any] = range(3 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , **_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = factor * value
snake_case_ : Optional[int] = value
while not is_prime(_UpperCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_UpperCamelCase )
return value
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 611 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase :
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :str=13 , _lowercase :Tuple=7 , _lowercase :Any=True , _lowercase :Optional[int]=True , _lowercase :Optional[Any]=True , _lowercase :Optional[int]=True , _lowercase :str=99 , _lowercase :Optional[int]=64 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=5 , _lowercase :Optional[int]=4 , _lowercase :Any=37 , _lowercase :Optional[int]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :str=0.1 , _lowercase :Union[str, Any]=5_12 , _lowercase :Optional[int]=16 , _lowercase :int=2 , _lowercase :Tuple=0.02 , _lowercase :Optional[Any]=3 , _lowercase :Dict=4 , _lowercase :List[Any]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = embedding_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :int , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self :Any , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Any , _lowercase :List[str] , _lowercase :Any , _lowercase :int ):
'''simple docstring'''
lowercase__ = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :Any , _lowercase :int , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Dict , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self :str , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :int , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self :str , _lowercase :str , _lowercase :Any , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self :List[Any] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :List[str] , _lowercase :int , _lowercase :int , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
# test_resize_embeddings = False
__lowerCamelCase = False
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :str , _lowercase :int=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = MegatronBertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def _A ( __magic_name__ ):
return torch.tensor(
__magic_name__ , dtype=torch.long , device=__magic_name__ , )
_snake_case = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
lowercase__ = os.path.join(os.environ["MYDIR"] , _lowercase )
lowercase__ = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
lowercase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowercase__ = model(_lowercase )[0]
lowercase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _lowercase )
lowercase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowercase__ = output[0, ii, jj]
lowercase__ = expected[3 * ii + jj]
lowercase__ = "ii={} jj={} a={} b={}".format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 611 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :int = load_from_cache_file
__magic_name__ :List[str] = file_format
__magic_name__ :Dict = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__magic_name__ :Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 0 |
"""simple docstring"""
A_ : Any = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 196 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
_UpperCamelCase = '▁'
# Segments (not really needed)
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
_UpperCamelCase = 4
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : int = VOCAB_FILES_NAMES
__snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Tuple = """left"""
__snake_case : Tuple = XLNetTokenizer
def __init__( self :Optional[Any] , __lowercase :List[Any]=None , __lowercase :Any=None , __lowercase :Tuple=False , __lowercase :str=True , __lowercase :List[str]=False , __lowercase :int="<s>" , __lowercase :Tuple="</s>" , __lowercase :Dict="<unk>" , __lowercase :Optional[Any]="<sep>" , __lowercase :Union[str, Any]="<pad>" , __lowercase :str="<cls>" , __lowercase :Tuple="<mask>" , __lowercase :str=["<eop>", "<eod>"] , **__lowercase :Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
__lowerCamelCase : int =3
__lowerCamelCase : Tuple =do_lower_case
__lowerCamelCase : int =remove_space
__lowerCamelCase : Optional[int] =keep_accents
__lowerCamelCase : int =vocab_file
__lowerCamelCase : Optional[int] =False if not self.vocab_file else True
def __lowercase ( self :Any , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : str =[self.sep_token_id]
__lowerCamelCase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self :Optional[Any] , __lowercase :List[int] , __lowercase :Optional[List[int]] = None ):
__lowerCamelCase : str =[self.sep_token_id]
__lowerCamelCase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self :int , __lowercase :str , __lowercase :Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : List[str] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 363 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase : List[str] =1024
__lowerCamelCase : Dict =4096
__lowerCamelCase : Tuple =24
__lowerCamelCase : str =16
__lowerCamelCase : Optional[Any] =[5, 11, 17, 23]
__lowerCamelCase : List[Any] =[256, 512, 1024, 1024]
__lowerCamelCase : List[str] =(1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase : List[Any] =True
__lowerCamelCase : Union[str, Any] =150
__lowerCamelCase : Any ='''huggingface/label-files'''
__lowerCamelCase : List[str] ='''ade20k-id2label.json'''
__lowerCamelCase : Optional[Any] =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
__lowerCamelCase : Union[str, Any] ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCamelCase : List[Any] =idalabel
__lowerCamelCase : Dict ={v: k for k, v in idalabel.items()}
__lowerCamelCase : str =[1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : List[Any] =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase : str =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__lowerCamelCase : str =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__lowerCamelCase : List[str] =name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__lowerCamelCase : str =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__lowerCamelCase : Any =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__lowerCamelCase : List[str] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase : Tuple =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__lowerCamelCase : Tuple =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__lowerCamelCase : Optional[int] =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__lowerCamelCase : str =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__lowerCamelCase : List[str] =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__lowerCamelCase : Any =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__lowerCamelCase : int =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__lowerCamelCase : int =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase : Dict =name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase : List[str] =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__lowerCamelCase : Optional[Any] =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__lowerCamelCase : str =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__lowerCamelCase : Any =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__lowerCamelCase : Optional[int] =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase : Dict =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase : List[str] =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase : str =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase : Optional[Any] =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase : Tuple =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase : Tuple =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase : int =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase : int =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__lowerCamelCase : Dict =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__lowerCamelCase : str =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__lowerCamelCase : str =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__lowerCamelCase : List[str] =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : int =state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase : Union[str, Any] =state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Tuple =in_proj_weight[: config.hidden_size, :]
__lowerCamelCase : List[str] =in_proj_bias[: config.hidden_size]
__lowerCamelCase : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase : Optional[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Tuple =get_dpt_config(SCREAMING_SNAKE_CASE )
# load original state_dict from URL
__lowerCamelCase : List[str] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase : str =state_dict.pop(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__lowerCamelCase : Dict =DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__lowerCamelCase : Union[str, Any] =480 if '''ade''' in checkpoint_url else 384
__lowerCamelCase : Dict =DPTImageProcessor(size=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =prepare_img()
__lowerCamelCase : int =image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
__lowerCamelCase : Tuple =model(**SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE ).predicted_depth
# Assert logits
__lowerCamelCase : Optional[Any] =torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase : List[str] =torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE )
)
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 363 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : List[str] = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
__UpperCamelCase : str = {'allegro/herbert-base-cased': 514}
__UpperCamelCase : List[Any] = {}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : List[str] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : Union[str, Any]="</s>" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , **UpperCamelCase__ , )
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 248 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''model_save_dir''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.get('''latest_model_name''' , UpperCamelCase__ )
def __call__( self : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def __A ( UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE : List[str] = '''CPUExecutionProvider'''
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE : Tuple = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[Union[bool, str, None]] = None , UpperCamelCase__ : Optional[Union[str, None]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional["ort.SessionOptions"] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE : int = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).parent
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCamelCase__ ).name
SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = None
if len(str(UpperCamelCase__ ).split('''@''' ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 248 | 1 |
"""simple docstring"""
import copy
import re
class __lowercase:
'''simple docstring'''
__a : List[Any] = 'hp'
__a : List[str] = {}
__a : Tuple = None
@classmethod
def snake_case_ ( cls , __a , __a ):
__lowerCamelCase : Dict = prefix
__lowerCamelCase : List[str] = defaults
cls.build_naming_info()
@staticmethod
def snake_case_ ( __a , __a ):
if len(__a ) == 0:
return ""
__lowerCamelCase : Optional[int] = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
__lowerCamelCase : Union[str, Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__lowerCamelCase : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a ):
__lowerCamelCase : List[Any] = ''
while integer != 0:
__lowerCamelCase : Dict = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
__lowerCamelCase : List[Any] = 0
while True:
__lowerCamelCase : Dict = word + '#' + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
__lowerCamelCase : Any = sword
break
__lowerCamelCase : Optional[int] = short_word
__lowerCamelCase : Tuple = word
return short_word
@staticmethod
def snake_case_ ( __a , __a ):
__lowerCamelCase : Union[str, Any] = param_name.split('_' )
__lowerCamelCase : Optional[Any] = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__lowerCamelCase : Union[str, Any] = ['', '_']
for separator in separators:
__lowerCamelCase : Any = separator.join(__a )
if shortname not in info["reverse_short_param"]:
__lowerCamelCase : Optional[int] = shortname
__lowerCamelCase : Optional[int] = param_name
return shortname
return param_name
@staticmethod
def snake_case_ ( __a , __a ):
__lowerCamelCase : Optional[int] = TrialShortNamer.shortname_for_key(__a , __a )
__lowerCamelCase : int = short_name
__lowerCamelCase : Any = param_name
@classmethod
def snake_case_ ( cls ):
if cls.NAMING_INFO is not None:
return
__lowerCamelCase : Union[str, Any] = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
__lowerCamelCase : Optional[int] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
__lowerCamelCase : Dict = info
@classmethod
def snake_case_ ( cls , __a ):
cls.build_naming_info()
assert cls.PREFIX is not None
__lowerCamelCase : List[str] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__lowerCamelCase : Any = cls.NAMING_INFO['short_param'][k]
if isinstance(__a , __a ):
__lowerCamelCase : str = 1 if v else 0
__lowerCamelCase : List[str] = '' if isinstance(__a , (int, float) ) else '-'
__lowerCamelCase : str = f'''{key}{sep}{v}'''
name.append(__a )
return "_".join(__a )
@classmethod
def snake_case_ ( cls , __a ):
__lowerCamelCase : List[Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__lowerCamelCase : Tuple = []
else:
__lowerCamelCase : Optional[Any] = repr.split('_' )
__lowerCamelCase : int = {}
for value in values:
if "-" in value:
__lowerCamelCase , __lowerCamelCase : Optional[int] = value.split('-' )
else:
__lowerCamelCase : Union[str, Any] = re.sub('[0-9.]' , '' , __a )
__lowerCamelCase : str = float(re.sub('[^0-9.]' , '' , __a ) )
__lowerCamelCase : Optional[Any] = cls.NAMING_INFO['reverse_short_param'][p_k]
__lowerCamelCase : List[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__lowerCamelCase : str = cls.DEFAULTS[k]
return parameters
| 263 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : Optional[int] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.0_1),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case_ ( cls ):
__lowerCamelCase : Tuple = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def snake_case_ ( self ):
__lowerCamelCase : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id='test-config' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Any = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
__lowerCamelCase : int = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id='valid_org/test-config-org' , push_to_hub=__a , use_auth_token=self._token )
__lowerCamelCase : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case_ ( self ):
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : List[str] = c.n_embd + 1 # int
__lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
__lowerCamelCase : int = not c.scale_attn_weights # bool
__lowerCamelCase : Optional[int] = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__a , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__a , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__a , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__a , c.summary_type , 'mismatch for key: summary_type' )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = PretrainedConfig()
__lowerCamelCase : Dict = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCamelCase : int = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {", ".join(__a )}.''' )
def snake_case_ ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__a )
def snake_case_ ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : Tuple = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Optional[Any] = HTTPError
__lowerCamelCase : str = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCamelCase : str = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : Any = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : Any = ['config.42.0.0.json']
__lowerCamelCase : Tuple = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , 'config.4.0.0.json' ) , os.path.join(__a , 'config.42.0.0.json' ) )
__lowerCamelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def snake_case_ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase : List[str] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCamelCase : Tuple = 'v4.0.0'
__lowerCamelCase , __lowerCamelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : Union[str, Any] = 'v3.0.0'
__lowerCamelCase : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 263 | 1 |
"""simple docstring"""
import functools
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = len(UpperCamelCase__ )
@functools.cache
def min_distance(UpperCamelCase__ , UpperCamelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCAmelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCamelCase__ ) , 1 + min_distance(UpperCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'levit'
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=2_24 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Dict=[1_28, 2_56, 3_84] , lowerCAmelCase_ : Tuple=[4, 8, 12] , lowerCAmelCase_ : List[str]=[4, 4, 4] , lowerCAmelCase_ : List[str]=[16, 16, 16] , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : List[Any]=[2, 2, 2] , lowerCAmelCase_ : Optional[Any]=0.02 , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : List[Any] =image_size
A__ : Any =num_channels
A__ : List[Any] =kernel_size
A__ : int =stride
A__ : Dict =padding
A__ : Union[str, Any] =hidden_sizes
A__ : Any =num_attention_heads
A__ : List[Any] =depths
A__ : List[Any] =key_dim
A__ : int =drop_path_rate
A__ : Dict =patch_size
A__ : Tuple =attention_ratio
A__ : Optional[int] =mlp_ratio
A__ : Optional[Any] =initializer_range
A__ : Dict =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : int ) -> float:
'''simple docstring'''
return 1e-4
| 687 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687 | 1 |
import math
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float ) -> float:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 253 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 253 | 1 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : float = 1E-12 , _snake_case : int = 100 , ):
assert np.shape(_snake_case )[0] == np.shape(_snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(_snake_case )[0] == np.shape(_snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_snake_case ) == np.iscomplexobj(_snake_case )
lowerCAmelCase : Dict = np.iscomplexobj(_snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : int = 0
lowerCAmelCase : Any = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase : Optional[Any] = np.dot(_snake_case , _snake_case )
# Normalize the resulting output vector.
lowerCAmelCase : Tuple = w / np.linalg.norm(_snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase : List[Any] = vector.conj().T if is_complex else vector.T
lowerCAmelCase : List[str] = np.dot(_snake_case , np.dot(_snake_case , _snake_case ) )
# Check convergence.
lowerCAmelCase : Any = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Any = lambda_
if is_complex:
lowerCAmelCase : Optional[int] = np.real(lambda_ )
return lambda_, vector
def _snake_case ( ):
lowerCAmelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase : Dict = np.array([41, 4, 20] )
lowerCAmelCase : List[str] = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase : str = real_input_matrix
lowerCAmelCase : Dict = real_vector
elif problem_type == "complex":
lowerCAmelCase : List[Any] = complex_input_matrix
lowerCAmelCase : Dict = complex_vector
# Our implementation.
lowerCAmelCase, lowerCAmelCase : Any = power_iteration(_snake_case , _snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase, lowerCAmelCase : str = np.linalg.eigh(_snake_case )
# Last eigenvalue is the maximum one.
lowerCAmelCase : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase : int = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_snake_case ) - np.abs(_snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 637 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
snake_case__ : List[Any] = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
lowerCAmelCase : Optional[int] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : str = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : int = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case , _snake_case ) )
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : List[str] = set()
lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[Any] = char
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCamelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase : List[Any] = bytes_to_unicode()
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase__ ( self : Union[str, Any] ):
return len(self.encoder )
def lowerCamelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : List[str] = tuple(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Tuple = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Tuple = tuple(UpperCamelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ )
lowerCAmelCase : List[str] = word
return word
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Dict = []
for token in re.findall(self.pat , UpperCamelCase_ ):
lowerCAmelCase : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Optional[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' )
lowerCAmelCase : Optional[int] = 0
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : Tuple = token_index
writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Any = [self.cls_token_id]
lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ):
lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
lowerCAmelCase : List[Any] = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
lowerCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ )
if needs_to_be_padded:
lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase : Dict = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 637 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a__):
_lowerCAmelCase = ['''image_processor''', '''tokenizer''']
_lowerCAmelCase = '''BridgeTowerImageProcessor'''
_lowerCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self, A, A ):
"""simple docstring"""
super().__init__(A, A )
def __call__( self, A, A = None, A = True, A = False, A = None, A = None, A = 0, A = None, A = None, A = None, A = False, A = False, A = False, A = False, A = True, A = None, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_token_type_ids=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_length=A, verbose=A, return_tensors=A, **A, )
# add pixel_values + pixel_mask
lowerCamelCase : Optional[int] = self.image_processor(
A, return_tensors=A, do_normalize=A, do_center_crop=A, **A )
encoding.update(A )
return encoding
def UpperCAmelCase_ ( self, *A, **A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A, **A )
def UpperCAmelCase_ ( self, *A, **A ):
"""simple docstring"""
return self.tokenizer.decode(*A, **A )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 320 |
'''simple docstring'''
import argparse
from collections import defaultdict
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict):
lowerCamelCase : Optional[int] = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Any = f.readlines()
lowerCamelCase : List[Any] = F'''class {class_name}('''
lowerCamelCase : Optional[Any] = F'''{4 * ' '}def {test_name}('''
lowerCamelCase : Tuple = F'''{8 * ' '}{correct_line.split()[0]}'''
lowerCamelCase : List[Any] = F'''{16 * ' '}{correct_line.split()[0]}'''
lowerCamelCase : Any = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = 0
lowerCamelCase : int = 0
lowerCamelCase : int = []
for line in lines:
if line.startswith(UpperCAmelCase__):
lowerCamelCase : List[str] = True
elif in_class and line.startswith(UpperCAmelCase__):
lowerCamelCase : str = True
elif in_class and in_func and (line.startswith(UpperCAmelCase__) or line.startswith(UpperCAmelCase__)):
lowerCamelCase : Optional[int] = len(line.split(correct_line.split()[0])[0])
count += 1
if count == done_test[_id]:
lowerCamelCase : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''')
lowerCamelCase : Union[str, Any] = False
else:
new_lines.append(UpperCAmelCase__)
with open(UpperCAmelCase__ , 'w') as f:
for line in new_lines:
f.write(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]=None):
if fail is not None:
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Any = {l.strip() for l in f.readlines()}
else:
lowerCamelCase : Dict = None
with open(UpperCAmelCase__ , 'r') as f:
lowerCamelCase : Optional[int] = f.readlines()
lowerCamelCase : str = defaultdict(UpperCAmelCase__)
for line in correct_lines:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = line.split(';')
if test_failures is None or "::".join([file, class_name, test_name]) in test_failures:
overwrite_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 320 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_A = logging.get_logger(__name__)
# General docstring
_A = "RegNetConfig"
# Base docstring
_A = "facebook/regnet-y-040"
_A = [1, 10_88, 7, 7]
# Image classification docstring
_A = "facebook/regnet-y-040"
_A = "tabby, tabby cat"
_A = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = 3 , _UpperCamelCase = 1 , _UpperCamelCase = 1 , _UpperCamelCase = "relu" , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase_ = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding="VALID" , groups=A_ , use_bias=A_ , name="convolution" , )
lowerCAmelCase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase_ = ACTaFN[activation] if activation is not None else tf.identity
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = self.convolution(self.padding(A_ ) )
lowerCAmelCase_ = self.normalization(A_ )
lowerCAmelCase_ = self.activation(A_ )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
super().__init__(**A_ )
lowerCAmelCase_ = config.num_channels
lowerCAmelCase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase_ = tf.transpose(A_ , perm=(0, 2, 3, 1) )
lowerCAmelCase_ = self.embedder(A_ )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = 2 , **_UpperCamelCase ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase_ = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name="convolution" )
lowerCAmelCase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def __a ( self , _UpperCamelCase , _UpperCamelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(A_ ) , training=A_ )
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> Any:
super().__init__(**A_ )
lowerCAmelCase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name="pooler" )
lowerCAmelCase_ = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self.pooler(A_ )
for layer_module in self.attention:
lowerCAmelCase_ = layer_module(A_ )
lowerCAmelCase_ = hidden_state * pooled
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , **_UpperCamelCase ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
TFRegNetShortCut(A_ , stride=A_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase_ = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name="layer.2" ),
]
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ = layer_module(A_ )
lowerCAmelCase_ = self.shortcut(A_ )
hidden_state += residual
lowerCAmelCase_ = self.activation(A_ )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , **_UpperCamelCase ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
TFRegNetShortCut(A_ , stride=A_ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase_ = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name="layer.3" ),
]
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = hidden_state
for layer_module in self.layers:
lowerCAmelCase_ = layer_module(A_ )
lowerCAmelCase_ = self.shortcut(A_ )
hidden_state += residual
lowerCAmelCase_ = self.activation(A_ )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 2 , _UpperCamelCase = 2 , **_UpperCamelCase ) -> Any:
super().__init__(**A_ )
lowerCAmelCase_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase_ = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name="layers.0" ),
*[layer(A_ , A_ , A_ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __a ( self , _UpperCamelCase ) -> int:
for layer_module in self.layers:
lowerCAmelCase_ = layer_module(A_ )
return hidden_state
class _lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=f"""stages.{i+1}""" ) )
def __a ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True ) -> TFBaseModelOutputWithNoAttention:
lowerCAmelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
lowerCAmelCase_ = stage_module(A_ )
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class _lowerCAmelCase ( tf.keras.layers.Layer ):
_lowercase =RegNetConfig
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> Tuple:
super().__init__(**A_ )
lowerCAmelCase_ = config
lowerCAmelCase_ = TFRegNetEmbeddings(A_ , name="embedder" )
lowerCAmelCase_ = TFRegNetEncoder(A_ , name="encoder" )
lowerCAmelCase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name="pooler" )
@unpack_inputs
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.embedder(A_ , training=A_ )
lowerCAmelCase_ = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
lowerCAmelCase_ = encoder_outputs[0]
lowerCAmelCase_ = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase_ = tf.transpose(A_ , perm=(0, 3, 1, 2) )
lowerCAmelCase_ = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase_ = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
_lowercase =RegNetConfig
_lowercase ="regnet"
_lowercase ="pixel_values"
@property
def __a ( self ) -> int:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_A = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_A = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _SCREAMING_SNAKE_CASE , )
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
super().__init__(A_ , *A_ , **A_ )
lowerCAmelCase_ = TFRegNetMainLayer(A_ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowerCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , _SCREAMING_SNAKE_CASE , )
class _lowerCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
def __init__( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
super().__init__(A_ , *A_ , **A_ )
lowerCAmelCase_ = config.num_labels
lowerCAmelCase_ = TFRegNetMainLayer(A_ , name="regnet" )
# classification head
lowerCAmelCase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowerCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
lowerCAmelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ = self.classifier[0](A_ )
lowerCAmelCase_ = self.classifier[1](A_ )
lowerCAmelCase_ = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
lowerCAmelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 703 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_A = 10
_A = 2_56
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
if len(__lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
lowerCAmelCase_ = MinHash(num_perm=__lowerCAmelCase )
for token in set(__lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__lowerCAmelCase ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__( self , *,
_UpperCamelCase = 0.85 , ) -> Dict:
lowerCAmelCase_ = duplication_jaccard_threshold
lowerCAmelCase_ = NUM_PERM
lowerCAmelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCAmelCase_ = defaultdict(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self._index.query(_UpperCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase )
def __a ( self ) -> List[List[Dict]]:
lowerCAmelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCAmelCase_ = [base] + list(_UpperCamelCase )
# reformat the cluster to be a list of dict
lowerCAmelCase_ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase )
return duplicate_clusters
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self.get_duplicate_clusters()
with open(_UpperCamelCase , "w" ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = element
lowerCAmelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__lowerCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float ):
"""simple docstring"""
lowerCAmelCase_ = DuplicationIndex(duplication_jaccard_threshold=__lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(__lowerCAmelCase , __lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
lowerCAmelCase_ = get_tokens(__lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = []
for elementa in cluster:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowerCAmelCase_ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(__lowerCAmelCase , __lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCAmelCase_ = 1
extremes.append(__lowerCAmelCase )
return extremes
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
lowerCAmelCase_ = dataset
lowerCAmelCase_ = []
lowerCAmelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=__lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__lowerCAmelCase , __lowerCAmelCase , ) , total=len(__lowerCAmelCase ) , ):
extremes_list.append(__lowerCAmelCase )
return extremes_list
def lowerCamelCase__ ( __lowerCAmelCase : Type[Dataset] , __lowerCAmelCase : float = 0.85 ):
"""simple docstring"""
lowerCAmelCase_ = make_duplicate_clusters(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowerCAmelCase_ = {}
lowerCAmelCase_ = find_extremes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowerCAmelCase_ = element
lowerCAmelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCAmelCase_ = dataset.filter(lambda __lowerCAmelCase , __lowerCAmelCase : idx not in remove_indices , with_indices=__lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCAmelCase_ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowerCAmelCase_ = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(__lowerCAmelCase )}""" )
print(F"""Number of duplicate clusters: {len(__lowerCAmelCase )}""" )
print(F"""Files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Unique files in duplicate cluster: {len(__lowerCAmelCase )}""" )
print(F"""Filtered dataset size: {len(__lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 279 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : CommonSchedulerState
# setable values
SCREAMING_SNAKE_CASE__ : jnp.ndarray
SCREAMING_SNAKE_CASE__ : jnp.ndarray
SCREAMING_SNAKE_CASE__ : Optional[int] = None
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , snake_case : Dict , snake_case : Dict , snake_case : Optional[Any] ):
"""simple docstring"""
return cls(common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
@dataclass
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : DDPMSchedulerState
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : jnp.dtype
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : int , snake_case : Dict = 1000 , snake_case : str = 0.0001 , snake_case : Optional[Any] = 0.02 , snake_case : List[Any] = "linear" , snake_case : List[str] = None , snake_case : Tuple = "fixed_small" , snake_case : Dict = True , snake_case : List[Any] = "epsilon" , snake_case : Union[str, Any] = jnp.floataa , ):
"""simple docstring"""
_snake_case : int = dtype
def __UpperCAmelCase ( self : List[str] , snake_case : Optional[int] = None ):
"""simple docstring"""
if common is None:
_snake_case : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_snake_case : Optional[int] = jnp.array(1.0 , dtype=self.dtype )
_snake_case : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def __UpperCAmelCase ( self : str , snake_case : str , snake_case : str , snake_case : List[Any] = None ):
"""simple docstring"""
return sample
def __UpperCAmelCase ( self : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : int = () ):
"""simple docstring"""
_snake_case : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_snake_case : Union[str, Any] = (jnp.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def __UpperCAmelCase ( self : Any , snake_case : int , snake_case : List[Any] , snake_case : Optional[Any]=None , snake_case : Optional[int]=None ):
"""simple docstring"""
_snake_case : Optional[Any] = state.common.alphas_cumprod[t]
_snake_case : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_snake_case : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_snake_case : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_snake_case : Optional[Any] = jnp.clip(lowerCAmelCase_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_snake_case : Tuple = jnp.log(jnp.clip(lowerCAmelCase_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_snake_case : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_snake_case : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_snake_case : List[Any] = variance
_snake_case : Union[str, Any] = state.common.betas[t]
_snake_case : Dict = (predicted_variance + 1) / 2
_snake_case : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCAmelCase ( self : str , snake_case : Dict , snake_case : str , snake_case : int , snake_case : Optional[Any] , snake_case : Optional[int] = None , snake_case : Union[str, Any] = True , ):
"""simple docstring"""
_snake_case : Union[str, Any] = timestep
if key is None:
_snake_case : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_snake_case : List[Any] = jnp.split(lowerCAmelCase_ , sample.shape[1] , axis=1 )
else:
_snake_case : Any = None
# 1. compute alphas, betas
_snake_case : List[str] = state.common.alphas_cumprod[t]
_snake_case : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_snake_case : List[Any] = 1 - alpha_prod_t
_snake_case : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_snake_case : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_snake_case : Dict = model_output
elif self.config.prediction_type == "v_prediction":
_snake_case : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_snake_case : Optional[Any] = jnp.clip(lowerCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_snake_case : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_snake_case : int = jax.random.split(lowerCAmelCase_ , num=1 )
_snake_case : int = jax.random.normal(lowerCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_snake_case : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_snake_case : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , state=lowerCAmelCase_ )
def __UpperCAmelCase ( self : List[str] , snake_case : Union[str, Any] , snake_case : str , snake_case : str , snake_case : Dict , ):
"""simple docstring"""
return add_noise_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : int , snake_case : Optional[Any] , ):
"""simple docstring"""
return get_velocity_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 517 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
A_ : Dict = data
A_ : Dict = previous
A_ : int = next_node
def __str__(self ):
return f"""{self.data}"""
def lowerCamelCase(self ):
return self.data
def lowerCamelCase(self ):
return self.next
def lowerCamelCase(self ):
return self.previous
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ ):
A_ : Optional[Any] = head
def __iter__(self ):
return self
def lowerCamelCase(self ):
if not self.current:
raise StopIteration
else:
A_ : str = self.current.get_data()
A_ : List[str] = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self ):
A_ : str = None # First node in list
A_ : Tuple = None # Last node in list
def __str__(self ):
A_ : int = self.head
A_ : str = []
while current is not None:
nodes.append(current.get_data() )
A_ : Any = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__(self , lowerCAmelCase_ ):
A_ : Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
A_ : Union[str, Any] = current.get_next()
return False
def __iter__(self ):
return LinkedListIterator(self.head )
def lowerCamelCase(self ):
if self.head:
return self.head.get_data()
return None
def lowerCamelCase(self ):
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase(self , lowerCAmelCase_ ):
if self.head is None:
A_ : str = node
A_ : Optional[int] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : List[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = node
A_ : Union[str, Any] = node.previous
if node.get_previous() is None:
A_ : Dict = node_to_insert
else:
A_ : Optional[Any] = node_to_insert
A_ : Any = node_to_insert
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : Dict = node
A_ : Optional[Any] = node.next
if node.get_next() is None:
A_ : Optional[int] = node_to_insert
else:
A_ : List[Any] = node_to_insert
A_ : Optional[Any] = node_to_insert
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = 1
A_ : Tuple = Node(lowerCAmelCase_ )
A_ : Dict = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
A_ : List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : Dict = self.head
while node:
if node.get_data() == item:
return node
A_ : Optional[int] = node.get_next()
raise Exception("""Node not found""" )
def lowerCamelCase(self , lowerCAmelCase_ ):
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
A_ : Tuple = self.head.get_next()
if node == self.tail:
A_ : Dict = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def lowerCamelCase(lowerCAmelCase_ ):
if node.get_next():
A_ : Any = node.previous
if node.get_previous():
A_ : int = node.next
A_ : str = None
A_ : int = None
def lowerCamelCase(self ):
return self.head is None
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
"""simple docstring"""
import random
from typing import Any
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
for _ in range(len(__UpperCamelCase ) ):
UpperCAmelCase__ : Dict = random.randint(0 , len(__UpperCamelCase ) - 1 )
UpperCAmelCase__ : Tuple = random.randint(0 , len(__UpperCamelCase ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : int = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCAmelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 194 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 194 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def __snake_case( self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
_UpperCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCAmelCase : Optional[int] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __snake_case( self , A_ , A_=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : Tuple = output.rgb, output.depth
_UpperCAmelCase : Dict = rgb[0, -3:, -3:, -1]
_UpperCAmelCase : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase : Any = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_UpperCAmelCase : List[Any] = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Tuple = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
_UpperCAmelCase : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : List[Any] = output.rgb, output.depth
_UpperCAmelCase : Any = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = depth_slice_a[0, -3:, -1]
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = 3 * [inputs.pop("""prompt""" )]
_UpperCAmelCase : Optional[int] = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
_UpperCAmelCase : List[Any] = text_inputs["""input_ids"""].to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : List[Any] = prompt_embeds
# forward
_UpperCAmelCase : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : List[str] = output.rgb, output.depth
_UpperCAmelCase : List[Any] = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __snake_case( self ):
_UpperCAmelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = """french fries"""
_UpperCAmelCase : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : List[str] = output.rgb, output.depth
_UpperCAmelCase : Any = rgb[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase : Any = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_UpperCAmelCase : Optional[Any] = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __snake_case( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ):
_UpperCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
_UpperCAmelCase : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_UpperCAmelCase : Optional[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = self.get_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : List[Any] = output.rgb, output.depth
_UpperCAmelCase : Optional[int] = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase : str = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
_UpperCAmelCase : Union[str, Any] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_UpperCAmelCase : str = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __snake_case( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 ):
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __snake_case( self ):
_UpperCAmelCase : int = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = self.get_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : Optional[int] = output.rgb, output.depth
_UpperCAmelCase : List[Any] = 0.4_9_5_5_8_6
_UpperCAmelCase : List[str] = 0.3_3_7_9_5_5_1_5
_UpperCAmelCase : List[Any] = 1_12.4_85_18
_UpperCAmelCase : List[Any] = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __snake_case( self ):
_UpperCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = self.get_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase,_UpperCAmelCase : Dict = output.rgb, output.depth
_UpperCAmelCase : int = 0.4_1_9_4_1_2_7
_UpperCAmelCase : Any = 0.3_5_3_7_5_5_8_6
_UpperCAmelCase : List[str] = 0.5_6_3_8_5_0_2
_UpperCAmelCase : Optional[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 643 |
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowerCAmelCase ( A_ : Union[str, Any] ) -> Any:
__UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
__UpperCAmelCase = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ )
if matches:
__UpperCAmelCase = float(matches[1] )
__UpperCAmelCase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase = 10_01
__UpperCAmelCase = "imagenet-1k-id2label.json"
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase = "background"
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( ) -> Any:
__UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( A_ : int , A_ : List[str] , A_ : str , A_ : str=False ) -> Union[str, Any]:
__UpperCAmelCase = get_mobilenet_va_config(A_ )
# Load 🤗 model
__UpperCAmelCase = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__UpperCAmelCase = model(**A_ )
__UpperCAmelCase = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__UpperCAmelCase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
__UpperCAmelCase = "google/" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 286 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase__ = "▁"
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = ["input_ids", "token_type_ids"]
UpperCamelCase_ : int = FNetTokenizer
def __init__( self , a=None , a=None , a=False , a=True , a=True , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , **a , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def A_ ( self , a , a = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self , a , a = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , a , a = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase = os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 612 |
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( __lowercase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __lowercase , )
| 612 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_( A : float , A : float , A : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('You cannot supply more or less than 2 values')
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor')
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor')
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : str = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 432 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a_ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
a_ = parser.parse_args()
a_ = 'cpu'
a_ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
a_ = 'path-to-your-trained-model'
a_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ = pipe.to(device)
# to channels last
a_ = pipe.unet.to(memory_format=torch.channels_last)
a_ = pipe.vae.to(memory_format=torch.channels_last)
a_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a_ = torch.randn(2, 4, 64, 64)
a_ = torch.rand(1) * 999
a_ = torch.randn(2, 77, 768)
a_ = (sample, timestep, encoder_hidden_status)
try:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a_ = 666
a_ = torch.Generator(device).manual_seed(seed)
a_ = {'generator': generator}
if args.steps is not None:
a_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 417 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'spiece.model'}
a_ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
a_ = {
'google/reformer-crime-and-punishment': 524_288,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Any , snake_case : List[Any] , snake_case : Any="</s>" , snake_case : Optional[Any]="<unk>" , snake_case : str=[] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : str , ) -> None:
"""simple docstring"""
UpperCamelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCamelCase_ : Dict = vocab_file
UpperCamelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.__dict__.copy()
UpperCamelCase_ : Any = None
return state
def __setstate__( self : Optional[Any] , snake_case : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : Optional[int] = {}
UpperCamelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[int] ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCamelCase_ : Tuple = self.sp_model.IdToPiece(snake_case )
return token
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = []
UpperCamelCase_ : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
UpperCamelCase_ : int = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Union[str, Any] = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
UpperCamelCase_ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 417 | 1 |
import colorsys
from PIL import Image # type: ignore
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
"""simple docstring"""
_UpperCamelCase = x
_UpperCamelCase = y
for step in range(snake_case__ ): # noqa: B007
_UpperCamelCase = a * a - b * b + x
_UpperCamelCase = 2 * a * b + y
_UpperCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __A(lowerCAmelCase ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __A(lowerCAmelCase ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def __A(lowerCAmelCase = 8_0_0 , lowerCAmelCase = 6_0_0 , lowerCAmelCase = -0.6 , lowerCAmelCase = 0 , lowerCAmelCase = 3.2 , lowerCAmelCase = 5_0 , lowerCAmelCase = True , ) -> Image.Image:
"""simple docstring"""
_UpperCamelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
_UpperCamelCase = figure_width / image_width * image_height
_UpperCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCamelCase = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCamelCase = get_color_coded_rgb(snake_case__ )
else:
_UpperCamelCase = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 717 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger()
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=__lowercase )
UpperCamelCase_ : list = field(default_factory=__lowercase )
def A_ ( self , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self , a ) -> Optional[int]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase__ :
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=__lowercase )
UpperCamelCase_ : List = field(default_factory=__lowercase )
def __call__( self , a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = Tracker(self.dest )(a ).parametrized
_UpperCamelCase = Tracker(self.src )(a ).parametrized
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
_UpperCamelCase = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ):
raise Exception(
F'Numbers of operations are different. Source module has {len(a )} operations while'
F' destination module has {len(a )}.' )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True ) -> Optional[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
_UpperCamelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval()
_UpperCamelCase = ResNetForImageClassification(lowerCAmelCase ).eval()
_UpperCamelCase = ModuleTransfer(src=lowerCAmelCase , dest=lowerCAmelCase )
_UpperCamelCase = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowerCAmelCase )
assert torch.allclose(from_model(lowerCAmelCase ) , our_model(lowerCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCamelCase = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
# we can use the convnext one
_UpperCamelCase = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __A(lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = """imagenet-1k-id2label.json"""
_UpperCamelCase = 1_0_0_0
_UpperCamelCase = (1, num_labels)
_UpperCamelCase = """huggingface/label-files"""
_UpperCamelCase = num_labels
_UpperCamelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
_UpperCamelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase )
_UpperCamelCase = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 202 | 0 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" ,type=UpperCAmelCase ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,)
parser.add_argument(
"--evaluation_set" ,type=UpperCAmelCase ,help="where to store parsed evaluation_set file" ,)
parser.add_argument(
"--gold_data_path" ,type=UpperCAmelCase ,help="where to store parsed gold_data_path file" ,)
_UpperCamelCase : List[str] = parser.parse_args()
with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open(
args.gold_data_path ,"w" ) as gold_file:
_UpperCamelCase : List[str] = json.load(UpperCAmelCase )
for dpr_record in tqdm(UpperCAmelCase ):
_UpperCamelCase : Tuple = dpr_record["question"]
_UpperCamelCase : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCAmelCase ) + "\n" )
if __name__ == "__main__":
main()
| 435 | '''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ : Any = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ : Optional[int] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ : int = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ : Union[str, Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ : int = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ : Optional[Any] = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ : str = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ : Union[str, Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ : str = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ : List[Any] = re.compile(r"""^\s*else:""")
def __A ( UpperCAmelCase ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(UpperCAmelCase ) is None:
return None
_UpperCamelCase : Optional[int] = [b[0] for b in _re_backend.findall(UpperCAmelCase )]
backends.sort()
return "_and_".join(UpperCAmelCase )
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with open(UpperCAmelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Optional[int] = f.readlines()
_UpperCamelCase : str = 0
while line_index < len(UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase : Tuple = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase ):
_UpperCamelCase : List[Any] = _re_one_line_import_struct.search(UpperCAmelCase ).groups()[0]
_UpperCamelCase : Tuple = re.findall(R"\[([^\]]+)\]" ,UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCamelCase : Optional[int] = _re_import_struct_key_value.search(UpperCAmelCase )
if single_line_import_search is not None:
_UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase : int = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCamelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase ) is not None:
_UpperCamelCase : Optional[Any] = _re_import_struct_add_many.search(UpperCAmelCase ).groups()[0].split(", " )
_UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_between_brackets.search(UpperCAmelCase ) is not None:
_UpperCamelCase : Dict = _re_between_brackets.search(UpperCAmelCase ).groups()[0].split(", " )
_UpperCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase ) > 0]
objects.extend(UpperCAmelCase )
elif _re_quote_object.search(UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
_UpperCamelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase : Any = []
while (
line_index < len(UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCamelCase : Optional[Any] = lines[line_index]
_UpperCamelCase : Optional[int] = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase : Any = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCamelCase : Any = lines[line_index]
_UpperCamelCase : Union[str, Any] = _re_import.search(UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_UpperCamelCase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
def find_duplicates(UpperCAmelCase ):
return [k for k, v in collections.Counter(UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_UpperCamelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase : List[str] = "base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __A ( ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : int = []
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
_UpperCamelCase : Dict = os.path.join(UpperCAmelCase ,"__init__.py" )
_UpperCamelCase : Any = parse_init(UpperCAmelCase )
if objects is not None:
_UpperCamelCase : Any = analyze_results(*UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
_UpperCamelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(UpperCAmelCase ) )
if len(UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(UpperCAmelCase ) )
def __A ( ) -> str:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = []
for path, directories, files in os.walk(UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCamelCase : Optional[int] = str((Path(UpperCAmelCase ) / folder).relative_to(UpperCAmelCase ) )
_UpperCamelCase : List[Any] = short_path.replace(os.path.sep ,"." )
submodules.append(UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase : Optional[Any] = str((Path(UpperCAmelCase ) / fname).relative_to(UpperCAmelCase ) )
_UpperCamelCase : int = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCAmelCase )
return submodules
lowerCAmelCase_ : Dict = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def __A ( ) -> Optional[int]:
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_UpperCamelCase : List[str] = direct_transformers_import(UpperCAmelCase )
_UpperCamelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCAmelCase ,"__init__.py" ) ,"r" ) as f:
_UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" ,UpperCAmelCase ) ) )
_UpperCamelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCAmelCase ) > 0:
_UpperCamelCase : Any = "\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 435 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_UpperCamelCase = datasets.logging.get_logger(__name__)
_UpperCamelCase = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
_UpperCamelCase = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
_UpperCamelCase = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
_UpperCamelCase = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowerCAmelCase : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowerCAmelCase : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowerCAmelCase : int = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowerCAmelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowerCAmelCase : Any = score.BleurtScorer(os.path.join(A_ , A_ ) )
def UpperCamelCase__ ( self , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.scorer.score(references=A_ , candidates=A_ )
return {"scores": scores}
| 583 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Optional[Any] = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "xlm"
_SCREAMING_SNAKE_CASE : Dict = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0145 , SCREAMING_SNAKE_CASE_ : Dict=2048 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=512 , SCREAMING_SNAKE_CASE_ : List[Any]=2048**-0.5 , SCREAMING_SNAKE_CASE_ : Tuple=1e-12 , SCREAMING_SNAKE_CASE_ : Dict=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int="first" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[str]=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = emb_dim
__snake_case = n_layers
__snake_case = n_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = use_lang_emb
__snake_case = layer_norm_eps
__snake_case = bos_index
__snake_case = eos_index
__snake_case = pad_index
__snake_case = unk_index
__snake_case = mask_index
__snake_case = is_encoder
__snake_case = max_position_embeddings
__snake_case = embed_init_std
__snake_case = init_std
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_proj_to_labels
__snake_case = summary_first_dropout
__snake_case = start_n_top
__snake_case = end_n_top
__snake_case = mask_token_id
__snake_case = lang_id
if "n_words" in kwargs:
__snake_case = kwargs['n_words']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
@property
def a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 56 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__SCREAMING_SNAKE_CASE :List[str] = {
'''allenai/led-base-16384''': 16384,
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = LEDTokenizer
_lowerCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Dict=None , snake_case_ : Union[str, Any]="replace" , snake_case_ : List[Any]="<s>" , snake_case_ : str="</s>" , snake_case_ : Any="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Dict="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=True , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case_ )
_UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case_ , state.pop("type" ) )
_UpperCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self : List[str] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : Dict , snake_case_ : str ):
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_UpperCAmelCase = value
def lowercase ( self : Union[str, Any] , *snake_case_ : Union[str, Any] , **snake_case_ : List[str] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowercase ( self : int , *snake_case_ : List[str] , **snake_case_ : Optional[int] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowercase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowercase ( self : Tuple , snake_case_ : Optional[Any] , snake_case_ : Any=None ):
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Union[str, Any] , snake_case_ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case_ : Optional[int] = None , snake_case_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , ):
_UpperCAmelCase = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs["global_attention_mask"] ) != len(snake_case_ )
if needs_to_be_padded:
_UpperCAmelCase = len(snake_case_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 236 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( A_ ):
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''ViltImageProcessor'''
lowerCamelCase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :List[str]=None , **_lowerCamelCase :Optional[int] ):
__SCREAMING_SNAKE_CASE : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor
def __call__( self :List[Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :List[str] = None , _lowerCamelCase :Tuple = True , _lowerCamelCase :Any = False , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Dict = None , _lowerCamelCase :Union[str, Any] = 0 , _lowerCamelCase :Tuple = None , _lowerCamelCase :Any = None , _lowerCamelCase :Optional[Any] = None , _lowerCamelCase :List[Any] = False , _lowerCamelCase :str = False , _lowerCamelCase :List[Any] = False , _lowerCamelCase :Any = False , _lowerCamelCase :List[str] = True , _lowerCamelCase :Optional[Any] = None , **_lowerCamelCase :str , ):
__SCREAMING_SNAKE_CASE : Any = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , *_lowerCamelCase :Dict , **_lowerCamelCase :Optional[Any] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int , *_lowerCamelCase :Optional[int] , **_lowerCamelCase :int ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 708 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : int = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
| 401 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = False
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
UpperCamelCase_ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
UpperCamelCase_ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
UpperCamelCase_ = reader.read()
UpperCamelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
UpperCamelCase_ = UNetaDModel(**config)
else:
UpperCamelCase_ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
UpperCamelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCamelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCamelCase_ = config[key]
del config[key]
UpperCamelCase_ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
UpperCamelCase_ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
UpperCamelCase_ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
UpperCamelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
UpperCamelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
UpperCamelCase_ = param_value
UpperCamelCase_ = True
if not has_changed:
UpperCamelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 611 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] ):
"""simple docstring"""
hf_model.apply_weight_norm()
__lowerCAmelCase = checkpoint["input_conv.weight_g"]
__lowerCAmelCase = checkpoint["input_conv.weight_v"]
__lowerCAmelCase = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
__lowerCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_g"]
__lowerCAmelCase = checkpoint["output_conv.1.weight_v"]
__lowerCAmelCase = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: str=None , UpperCamelCase: Tuple=None , ):
"""simple docstring"""
if config_path is not None:
__lowerCAmelCase = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase )
else:
__lowerCAmelCase = SpeechTaHifiGanConfig()
__lowerCAmelCase = SpeechTaHifiGan(UpperCamelCase )
__lowerCAmelCase = torch.load(UpperCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = np.load(UpperCamelCase )
__lowerCAmelCase = stats[0].reshape(-1 )
__lowerCAmelCase = stats[1].reshape(-1 )
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
__lowerCAmelCase = torch.from_numpy(UpperCamelCase ).float()
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611 | 1 |
import random
def __lowerCAmelCase ( snake_case : int , snake_case : float , snake_case : bool = False ) -> dict:
__lowerCamelCase: dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def __lowerCAmelCase ( snake_case : int ) -> dict:
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 |
_A : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: Optional[int] = input("""Enter message: """ )
__lowerCamelCase: Dict = input("""Enter key [alphanumeric]: """ )
__lowerCamelCase: List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase: Optional[int] = """encrypt"""
__lowerCamelCase: Optional[int] = encrypt_message(snake_case , snake_case )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase: Union[str, Any] = """decrypt"""
__lowerCamelCase: Optional[Any] = decrypt_message(snake_case , snake_case )
print(f'\n{mode.title()}ed message:' )
print(snake_case )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """encrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """decrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str , snake_case : str ) -> str:
__lowerCamelCase: Any = []
__lowerCamelCase: Optional[int] = 0
__lowerCamelCase: Any = key.upper()
for symbol in message:
__lowerCamelCase: int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case ):
__lowerCamelCase: Union[str, Any] = 0
else:
translated.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
main()
| 189 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[Any] ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = MobileBertConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = MobileBertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_mobilebert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 6 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = nn.ModuleList(__a )
def snake_case ( self , __a , __a , __a , __a , __a , __a = None , __a = None , __a = None , __a = None , __a = False , __a = True , ):
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
__lowerCAmelCase , __lowerCAmelCase = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
__lowerCAmelCase , __lowerCAmelCase = down_samples, mid_sample
else:
__lowerCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case ( self , __a , __a = True , __a = None , __a = False , __a = None , ):
__lowerCAmelCase = 0
__lowerCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
__lowerCAmelCase = model_path_to_save + f"_{idx}"
@classmethod
def snake_case ( cls , __a , **__a ):
__lowerCAmelCase = 0
__lowerCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCAmelCase = pretrained_model_path
while os.path.isdir(__a ):
__lowerCAmelCase = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
__lowerCAmelCase = pretrained_model_path + f"_{idx}"
logger.info(f"{len(__a )} controlnets loaded from {pretrained_model_path}." )
if len(__a ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__a )
| 636 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase__:
def __init__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Dict=3 , lowerCAmelCase : str=32 * 4 , lowerCAmelCase : int=32 * 6 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Any=32 , )-> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = is_training
UpperCAmelCase = use_auxiliary_loss
UpperCAmelCase = num_queries
UpperCAmelCase = num_channels
UpperCAmelCase = min_size
UpperCAmelCase = max_size
UpperCAmelCase = num_labels
UpperCAmelCase = mask_feature_size
def a__( self : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def a__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = output.encoder_hidden_states
UpperCAmelCase = output.pixel_decoder_hidden_states
UpperCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def a__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=False )-> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def a__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] )-> str:
"""simple docstring"""
UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(lowerCAmelCase : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCAmelCase = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCAmelCase = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCamelCase__( _a , _a , unittest.TestCase ):
__magic_name__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__magic_name__ : List[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__magic_name__ : Union[str, Any] = False
__magic_name__ : Optional[int] = False
__magic_name__ : int = False
__magic_name__ : List[Any] = False
def a__( self : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaskFormerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def a__( self : Any )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def a__( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def a__( self : Tuple )-> str:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def a__( self : Any )-> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__( self : Any )-> Any:
"""simple docstring"""
pass
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case_ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def a__( self : str )-> Dict:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def a__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = (self.model_tester.min_size,) * 2
UpperCAmelCase = {
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=snake_case_ ),
'''class_labels''': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ )
UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def a__( self : Any )-> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase : Any = 1E-4
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(snake_case_ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase = model(**snake_case_ )
UpperCAmelCase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCAmelCase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCAmelCase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(snake_case_ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
UpperCAmelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCAmelCase = model(**snake_case_ )
# masks_queries_logits
UpperCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCAmelCase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def a__( self : Dict )-> Any:
"""simple docstring"""
UpperCAmelCase = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCAmelCase = inputs['''pixel_values'''].to(snake_case_ )
UpperCAmelCase = [el.to(snake_case_ ) for el in inputs['''mask_labels''']]
UpperCAmelCase = [el.to(snake_case_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 720 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=3_0 , __lowerCamelCase=4_0_0 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=0.9 , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=[0.5, 0.5, 0.5] , ) -> str:
_A : int = size if size is not None else {'''shortest_edge''': 3_0}
_A : List[Any] = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
_A : List[str] = parent
_A : Union[str, Any] = batch_size
_A : int = num_channels
_A : List[Any] = min_resolution
_A : Tuple = max_resolution
_A : Optional[Any] = do_resize_and_center_crop
_A : Optional[Any] = size
_A : str = crop_pct
_A : List[Any] = crop_size
_A : Tuple = do_normalize
_A : List[str] = image_mean
_A : Optional[Any] = image_std
def _lowerCamelCase ( self) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self) -> str:
_A : str = PoolFormerImageProcessingTester(self)
@property
def _lowerCamelCase ( self) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self) -> List[Any]:
_A : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize_and_center_crop"))
self.assertTrue(hasattr(lowerCamelCase__ , "size"))
self.assertTrue(hasattr(lowerCamelCase__ , "crop_pct"))
self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(lowerCamelCase__ , "image_mean"))
self.assertTrue(hasattr(lowerCamelCase__ , "image_std"))
def _lowerCamelCase ( self) -> str:
_A : str = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 3_0})
self.assertEqual(image_processor.crop_size , {"height": 3_0, "width": 3_0})
_A : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def _lowerCamelCase ( self) -> Optional[int]:
pass
def _lowerCamelCase ( self) -> Union[str, Any]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image)
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A : Any = image_processing(lowerCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self) -> int:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray)
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A : List[str] = image_processing(lowerCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self) -> List[Any]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor)
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_A : Union[str, Any] = image_processing(lowerCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 503 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {'vocab_file': 'vocab.txt'}
__SCREAMING_SNAKE_CASE : Tuple = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__SCREAMING_SNAKE_CASE : Any = {
'openbmb/cpm-ant-10b': 10_24,
}
def UpperCAmelCase__ ( __magic_name__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowerCAmelCase : int = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowerCAmelCase : str = token.rstrip('''\n''' )
lowerCAmelCase : Tuple = index
return vocab
class __magic_name__ ( snake_case ):
def __init__( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : Union[str, Any]=2_0_0 ):
lowerCAmelCase : Union[str, Any] = vocab
lowerCAmelCase : int = unk_token
lowerCAmelCase : List[str] = max_input_chars_per_word
def _A ( self : List[Any] , lowerCamelCase__ : List[str] ):
lowerCAmelCase : str = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = []
while start < len(lowerCamelCase__ ):
lowerCAmelCase : Dict = len(lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = None
while start < end:
lowerCAmelCase : Tuple = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCAmelCase : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
lowerCAmelCase : List[Any] = end
return sub_tokens
class __magic_name__ ( snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = False
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str="<d>" , lowerCamelCase__ : Any="</d>" , lowerCamelCase__ : List[Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Any="<unk>" , lowerCamelCase__ : Union[str, Any]="</n>" , lowerCamelCase__ : Dict="</_>" , lowerCamelCase__ : Optional[int]="left" , **lowerCamelCase__ : Optional[int] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase : Tuple = bod_token
lowerCAmelCase : Tuple = eod_token
lowerCAmelCase : Union[str, Any] = load_vocab(lowerCamelCase__ )
lowerCAmelCase : Any = self.encoder[space_token]
lowerCAmelCase : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCAmelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
lowerCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _A ( self : Optional[Any] ):
return self.encoder[self.bod_token]
@property
def _A ( self : int ):
return self.encoder[self.eod_token]
@property
def _A ( self : int ):
return self.encoder["\n"]
@property
def _A ( self : Any ):
return len(self.encoder )
def _A ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : str , lowerCamelCase__ : int ):
lowerCAmelCase : Optional[Any] = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def _A ( self : int , lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ):
lowerCAmelCase : List[Any] = [i for i in token_ids if i >= 0]
lowerCAmelCase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def _A ( self : Optional[int] , lowerCamelCase__ : Any ):
return token in self.encoder
def _A ( self : Optional[Any] , lowerCamelCase__ : List[str] ):
return "".join(lowerCamelCase__ )
def _A ( self : Tuple , lowerCamelCase__ : Optional[Any] ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _A ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def _A ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if os.path.isdir(lowerCamelCase__ ):
lowerCAmelCase : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCAmelCase : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCAmelCase : Tuple = 0
if " " in self.encoder:
lowerCAmelCase : Optional[int] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCAmelCase : List[Any] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCAmelCase : int = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _A ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _A ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 348 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Tuple ) -> Dict:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = config_class
UpperCAmelCase_ : List[str] = has_text_modality
UpperCAmelCase_ : Tuple = kwargs
UpperCAmelCase_ : int = common_properties
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCAmelCase_ ):
try:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.parent.assertEqual(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCAmelCase_ ):
try:
UpperCAmelCase_ : Optional[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : List[str] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[str] = os.path.join(lowerCAmelCase_ , "config.json" )
config_first.to_json_file(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.config_class.from_json_file(lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self.config_class.from_pretrained(lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict )
UpperCAmelCase_ : int = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[int] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
config_first.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.config_class.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCAmelCase_ : List[Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
if self.config_class.is_composition:
return
UpperCAmelCase_ : str = self.config_class()
self.parent.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.config_class(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(lowerCAmelCase_ , lowerCAmelCase_ ) != value:
wrong_values.append((key, getattr(lowerCAmelCase_ , lowerCAmelCase_ ), value) )
if len(lowerCAmelCase_ ) > 0:
UpperCAmelCase_ : Any = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 463 | 0 |
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :int) -> float:
_A = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def snake_case ( ) -> Union[str, Any]:
print(sum_of_series(1 , 1 , 10))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 | import string
from math import logaa
def snake_case ( snake_case__ :str , snake_case__ :str) -> int:
_A = document.translate(
str.maketrans("""""" , """""" , string.punctuation)).replace("""\n""" , """""")
_A = document_without_punctuation.split(""" """) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def snake_case ( snake_case__ :str , snake_case__ :str) -> tuple[int, int]:
_A = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation)) # strip all punctuation and replace it with ''
_A = corpus_without_punctuation.split("""\n""")
_A = term.lower()
return (len([doc for doc in docs if term in doc]), len(snake_case__))
def snake_case ( snake_case__ :int , snake_case__ :int , snake_case__ :str=False) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(1 + logaa(n / (1 + df)) , 3)
if df == 0:
raise ZeroDivisionError("""df must be > 0""")
elif n == 0:
raise ValueError("""log10(0) is undefined.""")
return round(logaa(n / df) , 3)
def snake_case ( snake_case__ :int , snake_case__ :int) -> float:
return round(tf * idf , 3)
| 401 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__a = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase__( datasets.BuilderConfig ):
"""simple docstring"""
_A = None
def UpperCamelCase_ ( a_ , a_ , ) ->Optional[Any]:
import pyspark
def generate_fn():
A =df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
A =df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
A =partition_df.collect()
A =0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase__( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : List[Any]=None , ):
"""simple docstring"""
A =df
A =partition_order or range(self.df.rdd.getNumPartitions() )
A =_generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ):
"""simple docstring"""
yield from self.generate_examples_fn()
def _a ( self : Union[str, Any] , snake_case__ : np.random.Generator ):
"""simple docstring"""
A =list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
def _a ( self : str , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
A =self.split_shard_indices_by_worker(snake_case__ , snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCamelCase__( datasets.DatasetBuilder ):
"""simple docstring"""
_A = SparkConfig
def __init__( self : List[str] , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : str = None , snake_case__ : str = None , **snake_case__ : List[str] , ):
"""simple docstring"""
import pyspark
A =pyspark.sql.SparkSession.builder.getOrCreate()
A =df
A =working_dir
super().__init__(
cache_dir=snake_case__ , config_name=str(self.df.semanticHash() ) , **snake_case__ , )
def _a ( self : Dict ):
"""simple docstring"""
def create_cache_and_write_probe(snake_case__ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case__ )
A =os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A =(
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _a ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : List[str] , snake_case__ : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(snake_case__ : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
A =self.df.count()
A =df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A =(
self.df.limit(snake_case__ )
.repartition(1 )
.mapInArrow(snake_case__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A =approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A =min(snake_case__ , int(approx_total_size / max_shard_size ) )
A =self.df.repartition(snake_case__ )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , ):
"""simple docstring"""
import pyspark
A =ParquetWriter if file_format == "parquet" else ArrowWriter
A =os.path.join(self._working_dir , os.path.basename(snake_case__ ) ) if self._working_dir else fpath
A =file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A =self.config.features
A =self._writer_batch_size
A =self._fs.storage_options
def write_arrow(snake_case__ : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A =pyspark.TaskContext().taskAttemptId()
A =next(snake_case__ , snake_case__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
A =0
A =writer_class(
features=snake_case__ , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
A =pa.Table.from_batches([first_batch] )
writer.write_table(snake_case__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
A =writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
A =pa.Table.from_batches([batch] )
writer.write_table(snake_case__ )
if writer._num_bytes > 0:
A , A =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case__ ) ):
A =os.path.join(os.path.dirname(snake_case__ ) , os.path.basename(snake_case__ ) )
shutil.move(snake_case__ , snake_case__ )
A =(
self.df.mapInArrow(snake_case__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self : List[str] , snake_case__ : "datasets.SplitGenerator" , snake_case__ : str = "arrow" , snake_case__ : Optional[Union[str, int]] = None , snake_case__ : Optional[int] = None , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
self._validate_cache_dir()
A =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case__ )
A =not is_remote_filesystem(self._fs )
A =os.path.join if is_local else posixpath.join
A ="-TTTTT-SSSSS-of-NNNNN"
A =f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
A =path_join(self._output_dir , snake_case__ )
A =0
A =0
A =0
A =[]
A =[]
for task_id, content in self._prepare_split_single(snake_case__ , snake_case__ , snake_case__ ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) =content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case__ )
A =total_num_examples
A =total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
A =all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A =self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case__ : int , snake_case__ : int , snake_case__ : int , ):
rename(
snake_case__ , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
A =[]
A =0
for i in range(len(snake_case__ ) ):
A , A =task_id_and_num_shards[i]
for shard_id in range(snake_case__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case__ , len(snake_case__ ) ).map(lambda snake_case__ : _rename_shard(*snake_case__ ) ).collect()
else:
# don't use any pattern
A =0
A =task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(snake_case__ , "" ) , )
def _a ( self : Union[str, Any] , snake_case__ : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
from math import ceil
def _SCREAMING_SNAKE_CASE ( __lowercase : int = 1_0_0_1 ) -> int:
"""simple docstring"""
__A = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__A = 2 * i + 1
__A = 2 * i
__A = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__a : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 637 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__a : Union[str, Any] = logging.get_logger(__name__)
# General docstring
__a : List[str] = "MobileNetV1Config"
# Base docstring
__a : int = "google/mobilenet_v1_1.0_224"
__a : List[Any] = [1, 1024, 7, 7]
# Image classification docstring
__a : Optional[int] = "google/mobilenet_v1_1.0_224"
__a : List[Any] = "tabby, tabby cat"
__a : Union[str, Any] = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] , __lowercase : str , __lowercase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
__A = {}
if isinstance(__lowercase , __lowercase ):
__A = model.mobilenet_va
else:
__A = model
__A = """MobilenetV1/Conv2d_0/"""
__A = backbone.conv_stem.convolution.weight
__A = backbone.conv_stem.normalization.bias
__A = backbone.conv_stem.normalization.weight
__A = backbone.conv_stem.normalization.running_mean
__A = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__A = i + 1
__A = i * 2
__A = backbone.layer[pt_index]
__A = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__A = pointer.convolution.weight
__A = pointer.normalization.bias
__A = pointer.normalization.weight
__A = pointer.normalization.running_mean
__A = pointer.normalization.running_var
__A = backbone.layer[pt_index + 1]
__A = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__A = pointer.convolution.weight
__A = pointer.normalization.bias
__A = pointer.normalization.weight
__A = pointer.normalization.running_mean
__A = pointer.normalization.running_var
if isinstance(__lowercase , __lowercase ):
__A = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__A = model.classifier.weight
__A = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : int , __lowercase : int ) -> str:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
__A = tf.train.list_variables(__lowercase )
__A = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
__A = tf.train.load_variable(__lowercase , __lowercase )
__A = array
# Build TF to PyTorch weights loading map
__A = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
__A = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
__A = np.transpose(__lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
__A = array.squeeze().transpose()
else:
__A = np.transpose(__lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
__A = torch.from_numpy(__lowercase )
tf_weights.pop(__lowercase , __lowercase )
tf_weights.pop(name + """/RMSProp""" , __lowercase )
tf_weights.pop(name + """/RMSProp_1""" , __lowercase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , __lowercase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def _SCREAMING_SNAKE_CASE ( __lowercase : torch.Tensor , __lowercase : nn.Convad ) -> torch.Tensor:
"""simple docstring"""
__A , __A = features.shape[-2:]
__A , __A = conv_layer.stride
__A , __A = conv_layer.kernel_size
if in_height % stride_height == 0:
__A = max(kernel_height - stride_height , 0 )
else:
__A = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__A = max(kernel_width - stride_width , 0 )
else:
__A = max(kernel_width - (in_width % stride_width) , 0 )
__A = pad_along_width // 2
__A = pad_along_width - pad_left
__A = pad_along_height // 2
__A = pad_along_height - pad_top
__A = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__lowercase , __lowercase , """constant""" , 0.0 )
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[bool] = True , UpperCamelCase_ : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__A = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." )
__A = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__A = nn.Convad(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=UpperCamelCase_ , groups=UpperCamelCase_ , bias=UpperCamelCase_ , padding_mode="""zeros""" , )
if use_normalization:
__A = nn.BatchNormad(
num_features=UpperCamelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCamelCase_ , track_running_stats=UpperCamelCase_ , )
else:
__A = None
if use_activation:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__A = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase_ ):
__A = ACTaFN[config.hidden_act]
else:
__A = config.hidden_act
else:
__A = None
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__A = apply_tf_padding(UpperCamelCase_ , self.convolution )
__A = self.convolution(UpperCamelCase_ )
if self.normalization is not None:
__A = self.normalization(UpperCamelCase_ )
if self.activation is not None:
__A = self.activation(UpperCamelCase_ )
return features
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileNetVaConfig
SCREAMING_SNAKE_CASE = load_tf_weights_in_mobilenet_va
SCREAMING_SNAKE_CASE = "mobilenet_v1"
SCREAMING_SNAKE_CASE = "pixel_values"
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase_ : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__a : Tuple = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__a : int = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowercase_ , )
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase_ : MobileNetVaConfig , UpperCamelCase_ : bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase_ )
__A = config
__A = 32
__A = max(int(depth * config.depth_multiplier ) , config.min_depth )
__A = MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=config.num_channels , out_channels=UpperCamelCase_ , kernel_size=3 , stride=2 , )
__A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__A = nn.ModuleList()
for i in range(13 ):
__A = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__A = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase_ , in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , kernel_size=1 , ) )
__A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
__A = self.conv_stem(UpperCamelCase_ )
__A = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__A = layer_module(UpperCamelCase_ )
if output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = hidden_states
if self.pooler is not None:
__A = torch.flatten(self.pooler(UpperCamelCase_ ) , start_dim=1 )
else:
__A = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=UpperCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowercase_ , )
class __lowercase ( lowercase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase_ : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase_ )
__A = config.num_labels
__A = MobileNetVaModel(UpperCamelCase_ )
__A = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__A = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase_ )
__A = nn.Linear(UpperCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.mobilenet_va(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier(self.dropout(UpperCamelCase_ ) )
__A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__A = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__A = """single_label_classification"""
else:
__A = """multi_label_classification"""
if self.config.problem_type == "regression":
__A = MSELoss()
if self.num_labels == 1:
__A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__A = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__A = BCEWithLogitsLoss()
__A = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states , )
| 637 | 1 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->np.ndarray:
_UpperCAmelCase =int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase =np.zeros((n + 1,) )
_UpperCAmelCase =ya
_UpperCAmelCase =xa
for k in range(_lowerCamelCase ):
_UpperCAmelCase =y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCAmelCase =precision
_UpperCAmelCase =ceil(precision / 14 )
_UpperCAmelCase =42_6880 * Decimal(1_0005 ).sqrt()
_UpperCAmelCase =1
_UpperCAmelCase =1359_1409
_UpperCAmelCase =Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_UpperCAmelCase =factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case__ : str = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 592 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase="pt" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(""" """ ) else {}
UpperCAmelCase__ : Dict = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase_ ( __lowerCamelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="train" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="" , ):
super().__init__()
UpperCAmelCase__ : Any = Path(_lowerCAmelCase ).joinpath(type_path + """.source""" )
UpperCAmelCase__ : List[Any] = Path(_lowerCAmelCase ).joinpath(type_path + """.target""" )
UpperCAmelCase__ : int = self.get_char_lens(self.src_file )
UpperCAmelCase__ : Union[str, Any] = max_source_length
UpperCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase__ : Optional[Any] = tokenizer
UpperCAmelCase__ : Optional[int] = prefix
if n_obs is not None:
UpperCAmelCase__ : str = self.src_lens[:n_obs]
UpperCAmelCase__ : Union[str, Any] = src_lang
UpperCAmelCase__ : int = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = index + 1 # linecache starts at 1
UpperCAmelCase__ : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip("""\n""" )
UpperCAmelCase__ : Union[str, Any] = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip("""\n""" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
UpperCAmelCase__ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
UpperCAmelCase__ : Optional[Any] = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , """right""" )
UpperCAmelCase__ : Any = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , """right""" )
UpperCAmelCase__ : Any = source_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : Tuple = target_inputs["""input_ids"""].squeeze()
UpperCAmelCase__ : Optional[Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _lowerCAmelCase ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = torch.stack([x["""input_ids"""] for x in batch] )
UpperCAmelCase__ : Tuple = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCAmelCase__ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCAmelCase__ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : str = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
SCREAMING_SNAKE_CASE__ : Optional[int] = getLogger(__name__)
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _lowerCamelCase ( __lowerCamelCase ) -> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """git_log.json""" ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=4 , **__lowerCamelCase ) -> Tuple:
'''simple docstring'''
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _lowerCamelCase ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = git.Repo(search_parent_directories=__lowerCamelCase )
UpperCAmelCase__ : Tuple = {
"""repo_id""": str(__lowerCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List:
'''simple docstring'''
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
with open(__lowerCamelCase , """wb""" ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
def remove_articles(__lowerCamelCase ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , __lowerCamelCase )
def white_space_fix(__lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase ):
UpperCAmelCase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = normalize_answer(__lowerCamelCase ).split()
UpperCAmelCase__ : int = normalize_answer(__lowerCamelCase ).split()
UpperCAmelCase__ : List[Any] = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
UpperCAmelCase__ : Dict = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : Optional[Any] = 1.0 * num_same / len(__lowerCamelCase )
UpperCAmelCase__ : List[Any] = 1.0 * num_same / len(__lowerCamelCase )
UpperCAmelCase__ : str = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
'''simple docstring'''
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Any = """dropout_rate"""
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
UpperCAmelCase__ : str = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 79 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase__ : Any ='''Create a default config file for Accelerate with only a few flags set.'''
def __lowercase ( a__="no" , a__ = default_json_config_file , a__ = False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Path(a__ )
path.parent.mkdir(parents=a__ , exist_ok=a__ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
__SCREAMING_SNAKE_CASE = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
__SCREAMING_SNAKE_CASE = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
__SCREAMING_SNAKE_CASE = num_gpus
__SCREAMING_SNAKE_CASE = False
if num_gpus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_GPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
elif is_xpu_available() and use_xpu:
__SCREAMING_SNAKE_CASE = torch.xpu.device_count()
__SCREAMING_SNAKE_CASE = num_xpus
__SCREAMING_SNAKE_CASE = False
if num_xpus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_XPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
elif is_npu_available():
__SCREAMING_SNAKE_CASE = torch.npu.device_count()
__SCREAMING_SNAKE_CASE = num_npus
__SCREAMING_SNAKE_CASE = False
if num_npus > 1:
__SCREAMING_SNAKE_CASE = 'MULTI_NPU'
else:
__SCREAMING_SNAKE_CASE = 'NO'
else:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 'NO'
__SCREAMING_SNAKE_CASE = ClusterConfig(**a__ )
config.to_json_file(a__ )
return path
def __lowercase ( a__ , a__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = parser.add_parser('default' , parents=a__ , help=a__ , formatter_class=a__ )
parser.add_argument(
'--config_file' , default=a__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=a__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=a__ )
return parser
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 148 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """distilbert"""
_UpperCamelCase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , A_=3_0522 , A_=512 , A_=False , A_=6 , A_=12 , A_=768 , A_=4 * 768 , A_=0.1 , A_=0.1 , A_="gelu" , A_=0.02 , A_=0.1 , A_=0.2 , A_=0 , **A_ , ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Dict = sinusoidal_pos_embds
__lowerCAmelCase : Tuple = n_layers
__lowerCAmelCase : str = n_heads
__lowerCAmelCase : str = dim
__lowerCAmelCase : Union[str, Any] = hidden_dim
__lowerCAmelCase : Union[str, Any] = dropout
__lowerCAmelCase : List[Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : List[str] = qa_dropout
__lowerCAmelCase : List[str] = seq_classif_dropout
super().__init__(**A_ , pad_token_id=A_ )
class __lowercase (_UpperCAmelCase ):
@property
def UpperCamelCase__ ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 583 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = original_name.split('''.''' )[0]
__lowerCAmelCase : Dict = key.split('''.''' )
__lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 2] )
__lowerCAmelCase : Any = int(key_list[key_list.index(lowercase__ ) - 1] )
__lowerCAmelCase : List[Any] = orig_block_num - offset
__lowerCAmelCase : int = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[Any] = OrderedDict()
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
__lowerCAmelCase : Optional[Any] = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase : str = key[: key.find('''proj''' )]
__lowerCAmelCase : Optional[Any] = key.replace(lowercase__ , f"""patch_embeddings.{total_embed_found}.""" )
__lowerCAmelCase : Optional[Any] = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase : List[Any] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
__lowerCAmelCase : int = replace_key_with_offset(lowercase__ , lowercase__ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
__lowerCAmelCase : Any = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
__lowerCAmelCase : Optional[Any] = replace_key_with_offset(lowercase__ , lowercase__ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
__lowerCAmelCase : List[Any] = key.replace('''head''' , '''classifier''' )
__lowerCAmelCase : Dict = value
return new_state_dict
def _lowercase ( ):
__lowerCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : str = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase : List[Any] = '''huggingface/label-files'''
__lowerCAmelCase : Union[str, Any] = model_name[-3:]
__lowerCAmelCase : Dict = 1_0_0_0
__lowerCAmelCase : List[str] = '''imagenet-1k-id2label.json'''
__lowerCAmelCase : List[str] = (1, 1_0_0_0)
# set config attributes
__lowerCAmelCase : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Union[str, Any] = idalabel
__lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase : int = [2, 2, 6, 2]
__lowerCAmelCase : Any = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : Union[str, Any] = 4.0
__lowerCAmelCase : str = 0.9
elif size == "s24":
__lowerCAmelCase : List[str] = [4, 4, 1_2, 4]
__lowerCAmelCase : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : Optional[int] = 4.0
__lowerCAmelCase : Optional[int] = 0.9
elif size == "s36":
__lowerCAmelCase : Optional[int] = [6, 6, 1_8, 6]
__lowerCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase : str = 4.0
__lowerCAmelCase : str = 1E-6
__lowerCAmelCase : Optional[Any] = 0.9
elif size == "m36":
__lowerCAmelCase : Any = [6, 6, 1_8, 6]
__lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase : Any = 4.0
__lowerCAmelCase : Any = 1E-6
__lowerCAmelCase : Any = 0.9_5
elif size == "m48":
__lowerCAmelCase : Union[str, Any] = [8, 8, 2_4, 8]
__lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase : int = 4.0
__lowerCAmelCase : int = 1E-6
__lowerCAmelCase : List[Any] = 0.9_5
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
__lowerCAmelCase : List[str] = PoolFormerImageProcessor(crop_pct=lowercase__ )
# Prepare image
__lowerCAmelCase : Tuple = prepare_img()
__lowerCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
__lowerCAmelCase : str = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) )
# rename keys
__lowerCAmelCase : List[Any] = rename_keys(lowercase__ )
# create HuggingFace model and load state dict
__lowerCAmelCase : Any = PoolFormerForImageClassification(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Define image processor
__lowerCAmelCase : Dict = PoolFormerImageProcessor(crop_pct=lowercase__ )
__lowerCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
__lowerCAmelCase : List[Any] = model(lowercase__ )
__lowerCAmelCase : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase : Dict = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
__lowerCAmelCase : int = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
__lowerCAmelCase : Optional[int] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
__lowerCAmelCase : List[Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
__lowerCAmelCase : str = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_UpperCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 583 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowerCAmelCase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
_lowerCAmelCase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
_lowerCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return x[0]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_letter_count(_lowerCamelCase )
_lowerCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
_lowerCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_lowerCamelCase )
_lowerCAmelCase : List[Any] = ''.join(freq_to_letter[freq] )
_lowerCAmelCase : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase , reverse=_lowerCamelCase )
_lowerCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_frequency_order(_lowerCamelCase )
_lowerCAmelCase : Tuple = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
while repunit:
_lowerCAmelCase : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 259 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = 1
__lowercase = 3
__lowercase = (32, 32)
__lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase )
return image
@property
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def a__ ( self : Any ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(_UpperCAmelCase )
@property
def a__ ( self : str ) -> str:
"""simple docstring"""
def extract(*_UpperCAmelCase : str , **_UpperCAmelCase : Optional[Any] ):
class A__ :
def __init__( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = torch.ones([0] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.pixel_values.to(_UpperCAmelCase )
return self
return Out()
return extract
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__lowercase = 77
__lowercase = self.dummy_image.to(_UpperCAmelCase )
__lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__lowercase = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
__lowercase = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = 'A painting of a squirrel eating a burger'
__lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowercase = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , )
__lowercase = output.images
__lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__lowercase = alt_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__lowercase = 77
__lowercase = self.dummy_image.to(_UpperCAmelCase )
# put models in fp16
__lowercase = unet.half()
__lowercase = vae.half()
__lowercase = bert.half()
# make sure here that pndm scheduler skips prk
__lowercase = AltDiffusionImgaImgPipeline(
unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , )
__lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_UpperCAmelCase )
__lowercase = alt_pipe.to(_UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__lowercase = 'A painting of a squirrel eating a burger'
__lowercase = torch.manual_seed(0 )
__lowercase = alt_pipe(
[prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , image=_UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
__lowercase = init_image.resize((7_60, 5_04) )
__lowercase = 'BAAI/AltDiffusion'
__lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowercase = 'A fantasy landscape, trending on artstation'
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type='np' , )
__lowercase = output.images[0]
__lowercase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
__lowercase = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
__lowercase = init_image.resize((7_68, 5_12) )
__lowercase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
__lowercase = 'BAAI/AltDiffusion'
__lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__lowercase = 'A fantasy landscape, trending on artstation'
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_UpperCAmelCase , output_type='np' , )
__lowercase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 714 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( UpperCAmelCase_ ):
lowercase_ : Optional[Any] = '''new-model'''
if is_tf_available():
class _A ( UpperCAmelCase_ ):
lowercase_ : Dict = NewModelConfig
@require_tf
class _A ( unittest.TestCase ):
@slow
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : List[str] = """bert-base-cased"""
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """bert-base-cased"""
__UpperCamelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : int ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def a ( self : Any ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def a ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : str = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = copy.deepcopy(model.config )
__UpperCamelCase : List[Any] = ["""FunnelBaseModel"""]
__UpperCamelCase : Optional[int] = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : str = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def a ( self : Dict ):
"""simple docstring"""
try:
AutoConfig.register("""new-model""" , lowerCamelCase__ )
__UpperCamelCase : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : Dict = BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] = NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Optional[int] = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCamelCase : Optional[int] = TFAutoModel.from_pretrained("""bert-base""" )
def a ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCamelCase : Optional[Any] = TFAutoModel.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def a ( self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__UpperCamelCase : List[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def a ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , """Use `from_pt=True` to load this model""" ):
__UpperCamelCase : Union[str, Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : str = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__UpperCamelCase : Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Union[str, Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__UpperCamelCase : List[Any] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 269 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = ['model.decoder.embed_positions.weights']
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
if "emb" in name:
__UpperCamelCase : Any = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
__UpperCamelCase : str = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
__UpperCamelCase : List[str] = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
__UpperCamelCase : Optional[int] = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
__UpperCamelCase : Optional[Any] = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
__UpperCamelCase : Tuple = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
__UpperCamelCase : List[str] = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
__UpperCamelCase : Tuple = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
__UpperCamelCase : Union[str, Any] = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
__UpperCamelCase : Optional[int] = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase : Tuple = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : OrderedDict , __lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
__UpperCamelCase : Tuple = list(state_dict.keys() )
__UpperCamelCase : List[Any] = {}
for key in keys:
__UpperCamelCase : Optional[Any] = state_dict.pop(__lowerCAmelCase )
__UpperCamelCase : Dict = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase : Optional[Any] = val[:hidden_size, :]
__UpperCamelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase : Dict = val
else:
__UpperCamelCase : Any = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( __lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__UpperCamelCase : int = 1024
__UpperCamelCase : Union[str, Any] = 24
__UpperCamelCase : int = 16
elif checkpoint == "medium":
__UpperCamelCase : List[Any] = 1536
__UpperCamelCase : Dict = 48
__UpperCamelCase : Dict = 24
elif checkpoint == "large":
__UpperCamelCase : List[Any] = 2048
__UpperCamelCase : str = 48
__UpperCamelCase : Optional[int] = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__UpperCamelCase : Any = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str="cpu" ) -> Optional[int]:
__UpperCamelCase : str = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
__UpperCamelCase : int = decoder_config_from_checkpoint(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
__UpperCamelCase : List[Any] = TaEncoderModel.from_pretrained("""t5-base""" )
__UpperCamelCase : Dict = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__UpperCamelCase : List[str] = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase : Tuple = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__UpperCamelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
__UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase : int = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""t5-base""" )
__UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
__UpperCamelCase : Any = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
__UpperCamelCase : Tuple = 2048
__UpperCamelCase : int = 2048
# set other default generation config params
__UpperCamelCase : str = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase : List[str] = True
__UpperCamelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
UpperCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase : str = TypeVar("""T""")
lowerCAmelCase : List[Any] = TypeVar("""U""")
class UpperCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = key
_lowerCAmelCase : str = val
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
_lowerCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ):
'''simple docstring'''
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ )
_lowerCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case__ , snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : Dict = self.rear, self.head
def __repr__( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ['DoubleLinkedList']
_lowerCAmelCase : Tuple = self.head
while node.next is not None:
rep.append(str(snake_case__ ) )
_lowerCAmelCase : Tuple = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase : str = node
_lowerCAmelCase : Optional[int] = previous
_lowerCAmelCase : int = node
_lowerCAmelCase : Optional[int] = self.rear
def a ( self , snake_case__ ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_lowerCAmelCase : Tuple = node.next
_lowerCAmelCase : Tuple = node.prev
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = None
return node
class UpperCamelCase__ ( Generic[T, U] ):
"""simple docstring"""
__magic_name__ = {}
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
_lowerCAmelCase : Any = capacity
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ):
'''simple docstring'''
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , snake_case__ ):
'''simple docstring'''
return key in self.cache
def a ( self , snake_case__ ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
_lowerCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
_lowerCAmelCase : Optional[int] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case__ )
return node.val
self.miss += 1
return None
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase : Optional[int] = DoubleLinkedListNode(snake_case__ , snake_case__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase : Tuple = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase : Union[str, Any] = value
self.list.add(snake_case__ )
@classmethod
def a ( cls , snake_case__ = 128 ):
'''simple docstring'''
def cache_decorator_inner(snake_case__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase : Optional[int] = LRUCache(snake_case__ )
_lowerCAmelCase : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase : Union[str, Any] = func(*snake_case__ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case__ , 'cache_info' , snake_case__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = (DDPMScheduler,)
def a ( self , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def a ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def a ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def a ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def a ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def a ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def a ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Dict = pred_prev_sample
_lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = len(snake_case__ )
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase : Tuple = pred_prev_sample
_lowerCAmelCase : Any = torch.sum(torch.abs(snake_case__ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCAmelCase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCAmelCase : str = -1
else:
_lowerCAmelCase : Optional[Any] = timesteps[i + 1]
_lowerCAmelCase : int = scheduler.previous_timestep(snake_case__ )
_lowerCAmelCase : int = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = [100, 87, 50, 1, 0]
_lowerCAmelCase : int = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case__ )
_lowerCAmelCase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 630 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "mask2former"
UpperCAmelCase__ = ["swin"]
UpperCAmelCase__ = {"hidden_size": "hidden_dim"}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 1_024 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 6 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 2_048 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 255 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 2.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 12_544 , _SCREAMING_SNAKE_CASE = 3.0 , _SCREAMING_SNAKE_CASE = 0.7_5 , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Dict:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__UpperCamelCase = backbone_config.pop('model_type' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
__UpperCamelCase = backbone_config
__UpperCamelCase = feature_size
__UpperCamelCase = mask_feature_size
__UpperCamelCase = hidden_dim
__UpperCamelCase = encoder_feedforward_dim
__UpperCamelCase = activation_function
__UpperCamelCase = encoder_layers
__UpperCamelCase = decoder_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = dim_feedforward
__UpperCamelCase = pre_norm
__UpperCamelCase = enforce_input_projection
__UpperCamelCase = common_stride
__UpperCamelCase = ignore_value
__UpperCamelCase = num_queries
__UpperCamelCase = no_object_weight
__UpperCamelCase = class_weight
__UpperCamelCase = mask_weight
__UpperCamelCase = dice_weight
__UpperCamelCase = train_num_points
__UpperCamelCase = oversample_ratio
__UpperCamelCase = importance_sample_ratio
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = use_auxiliary_loss
__UpperCamelCase = feature_strides
__UpperCamelCase = output_auxiliary_logits
__UpperCamelCase = decoder_layers
super().__init__(**_SCREAMING_SNAKE_CASE )
@classmethod
def __lowercase( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return cls(
backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __lowercase( self ) -> Dict[str, any]:
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.backbone_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 383 |
_snake_case = 8.3144598
def _a ( __lowercase , __lowercase ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 383 | 1 |
'''simple docstring'''
import numpy
class lowercase :
def __init__( self , _snake_case , _snake_case) -> None:
UpperCAmelCase_ : Optional[Any] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase_ : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase_ : List[str] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase_ : Dict = numpy.random.rand(3 , 1)
# Real output values provided.
UpperCAmelCase_ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase_ : Union[str, Any] = numpy.zeros(output_array.shape)
def _snake_case ( self) -> numpy.ndarray:
UpperCAmelCase_ : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _snake_case ( self) -> None:
UpperCAmelCase_ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
UpperCAmelCase_ : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
UpperCAmelCase_ : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _snake_case ( self , _snake_case , _snake_case , _snake_case) -> None:
for iteration in range(1 , iterations + 1):
UpperCAmelCase_ : int = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase_ : List[Any] = numpy.mean(numpy.square(output - self.feedforward()))
print(F"""Iteration {iteration} Loss: {loss}""")
def _snake_case ( self , _snake_case) -> int:
UpperCAmelCase_ : Optional[int] = input_arr
UpperCAmelCase_ : Tuple = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
UpperCAmelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
UpperCAmelCase_ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE( UpperCamelCase ) -> numpy.ndarray:
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE( ) -> int:
UpperCAmelCase_ : Optional[int] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
UpperCAmelCase_ : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase_ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=UpperCamelCase ,output_array=UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCamelCase ,iterations=1_0 ,give_loss=UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 719 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class lowercase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> str:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : Optional[Any] = num_choices
UpperCAmelCase_ : str = scope
def _snake_case ( self) -> Tuple:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case) -> Union[str, Any]:
UpperCAmelCase_ : str = OpenLlamaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Dict = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ : List[str] = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> int:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = OpenLlamaModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
UpperCAmelCase_ : Any = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Any:
UpperCAmelCase_ : Optional[int] = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ) -> Union[str, Any]:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = OpenLlamaForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
# first forward pass
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
UpperCAmelCase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ : Optional[Any] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
UpperCAmelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def _snake_case ( self) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( a_, a_, a_, unittest.TestCase ):
_lowerCamelCase : Dict= (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCamelCase : str= (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : Optional[int]= (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple= False
_lowerCamelCase : int= False
def _snake_case ( self) -> int:
UpperCAmelCase_ : Any = OpenLlamaModelTester(self)
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def _snake_case ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self) -> str:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[Any] = type
self.model_tester.create_and_check_model(*_snake_case)
def _snake_case ( self) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : List[str] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : List[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : Tuple = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 3
UpperCAmelCase_ : int = 'single_label_classification'
UpperCAmelCase_ : int = input_dict['input_ids']
UpperCAmelCase_ : Any = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
UpperCAmelCase_ : int = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[str] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _snake_case ( self) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Any = 'multi_label_classification'
UpperCAmelCase_ : Tuple = input_dict['input_ids']
UpperCAmelCase_ : str = input_ids.ne(1).to(_snake_case)
UpperCAmelCase_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
UpperCAmelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ : List[Any] = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _snake_case ( self) -> Optional[int]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _snake_case ( self , _snake_case) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : int = OpenLlamaModel(_snake_case)
original_model.to(_snake_case)
original_model.eval()
UpperCAmelCase_ : Optional[Any] = original_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Tuple = original_model(_snake_case).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Dict = {'type': scaling_type, 'factor': 10.0}
UpperCAmelCase_ : Union[str, Any] = OpenLlamaModel(_snake_case)
scaled_model.to(_snake_case)
scaled_model.eval()
UpperCAmelCase_ : Dict = scaled_model(_snake_case).last_hidden_state
UpperCAmelCase_ : Optional[int] = scaled_model(_snake_case).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
| 471 | 0 |
from __future__ import annotations
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = len(_lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE : Optional[Any] = i + 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 248 | def A ( _lowercase , _lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = "Hello world! cécé herlolip"
def lowercase_ ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
snake_case = roberta.model.encoder.sentence_encoder
snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , A__ )
snake_case = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case = roberta_sent_encoder.embed_tokens.weight
snake_case = roberta_sent_encoder.embed_positions.weight
snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case = roberta_sent_encoder.layer_norm.weight
snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case = model.roberta.encoder.layer[i]
snake_case = roberta_sent_encoder.layers[i]
snake_case = layer.attention
snake_case = roberta_layer.self_attn_layer_norm.weight
snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case = roberta_layer.self_attn.q_proj.weight
snake_case = roberta_layer.self_attn.q_proj.bias
snake_case = roberta_layer.self_attn.k_proj.weight
snake_case = roberta_layer.self_attn.k_proj.bias
snake_case = roberta_layer.self_attn.v_proj.weight
snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case = roberta_layer.self_attn.out_proj.weight
snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case = roberta_layer.final_layer_norm.weight
snake_case = roberta_layer.final_layer_norm.bias
# intermediate
snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# output
snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case = roberta_layer.fca.weight
snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case = roberta.model.classification_heads["mnli"].dense.weight
snake_case = roberta.model.classification_heads["mnli"].dense.bias
snake_case = roberta.model.classification_heads["mnli"].out_proj.weight
snake_case = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
snake_case = roberta.model.encoder.lm_head.dense.weight
snake_case = roberta.model.encoder.lm_head.dense.bias
snake_case = roberta.model.encoder.lm_head.layer_norm.weight
snake_case = roberta.model.encoder.lm_head.layer_norm.bias
snake_case = roberta.model.encoder.lm_head.weight
snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
snake_case = model(A__ )[0]
if classification_head:
snake_case = roberta.model.classification_heads["mnli"](roberta.extract_features(A__ ) )
else:
snake_case = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
snake_case = torch.allclose(A__ , A__ , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_A = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 709 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 294 | 0 |
def a__ ( A_ = 100 ):
'''simple docstring'''
__magic_name__ = set()
__magic_name__ = 0
__magic_name__ = n + 1 # maximum limit
for a in range(2, A_ ):
for b in range(2, A_ ):
__magic_name__ = a**b # calculates the current power
collect_powers.add(A_ ) # adds the result to the set
return len(A_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 529 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( A_ ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__magic_name__ = model_type_to_module_name(A_ )
__magic_name__ = importlib.import_module(f'''.{module_name}''', """transformers.models""" )
try:
return getattr(A_, A_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A_, """__name__""", A_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__magic_name__ = importlib.import_module("""transformers""" )
if hasattr(A_, A_ ):
return getattr(A_, A_ )
return None
def a__ ( A_, A_ = None, A_ = False, A_ = False, A_ = None, A_ = None, A_ = None, A_ = False, **A_, ):
'''simple docstring'''
__magic_name__ = get_file_from_repo(
A_, A_, cache_dir=A_, force_download=A_, resume_download=A_, proxies=A_, use_auth_token=A_, revision=A_, local_files_only=A_, )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(A_, encoding="""utf-8""" ) as reader:
return json.load(A_ )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def _lowercase ( cls : Optional[int] , UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = kwargs.pop("""config""" , UpperCamelCase__ )
__magic_name__ = kwargs.pop("""trust_remote_code""" , UpperCamelCase__ )
__magic_name__ = True
__magic_name__ , __magic_name__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = config_dict.get("""feature_extractor_type""" , UpperCamelCase__ )
__magic_name__ = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__magic_name__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.feature_extractor_type``
__magic_name__ = getattr(UpperCamelCase__ , """feature_extractor_type""" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
__magic_name__ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
__magic_name__ = feature_extractor_class_from_name(UpperCamelCase__ )
__magic_name__ = feature_extractor_auto_map is not None
__magic_name__ = feature_extractor_class is not None or type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING
__magic_name__ = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
__magic_name__ = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = kwargs.pop("""code_revision""" , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING:
__magic_name__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__ )]
return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _lowercase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 529 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
A__ = logging.getLogger(__name__)
A__ = {'''facebook/bart-base''': BartForConditionalGeneration}
A__ = {'''facebook/bart-base''': BartTokenizer}
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__lowerCAmelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__lowerCAmelCase , )
parser.add_argument(
'''--config_name''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__lowerCAmelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Where to store the final ONNX file.''' )
snake_case__ : List[Any] = parser.parse_args()
return args
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Any:
"""simple docstring"""
snake_case__ : Any = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ : Dict = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = None
snake_case__ : str = 0
return huggingface_model, tokenizer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
model.eval()
snake_case__ : Tuple = None
snake_case__ : Optional[int] = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
snake_case__ : Any = '''My friends are cool but they eat too many carbs.'''
snake_case__ : Optional[int] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case__ : Tuple = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__lowerCAmelCase , )
logger.info('''Model exported to {}'''.format(__lowerCAmelCase ) )
snake_case__ : List[Any] = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__lowerCAmelCase ) )
snake_case__ : Any = onnxruntime.InferenceSession(__lowerCAmelCase )
snake_case__ : Dict = ort_sess.run(
__lowerCAmelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__lowerCAmelCase ),
'''max_length''': np.array(__lowerCAmelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = parse_args()
snake_case__ : int = 5
snake_case__ : Tuple = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ : Dict = torch.device(args.device )
snake_case__ , snake_case__ : str = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__lowerCAmelCase )
if args.max_length:
snake_case__ : Optional[int] = args.max_length
if args.num_beams:
snake_case__ : Optional[Any] = args.num_beams
if args.output_file_path:
snake_case__ : Any = args.output_file_path
else:
snake_case__ : Any = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 219 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = BertConfig.from_json_file(__lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ : Optional[Any] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 219 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> List[str]:
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 572 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "swin2sr"
__UpperCamelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , A__ : int=6_4 , A__ : List[Any]=1 , A__ : List[Any]=3 , A__ : Any=1_8_0 , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Optional[int]=[6, 6, 6, 6, 6, 6] , A__ : Dict=8 , A__ : Any=2.0 , A__ : Optional[int]=True , A__ : Union[str, Any]=0.0 , A__ : Union[str, Any]=0.0 , A__ : List[str]=0.1 , A__ : Any="gelu" , A__ : Tuple=False , A__ : Optional[int]=0.02 , A__ : List[Any]=1E-5 , A__ : Any=2 , A__ : Union[str, Any]=1.0 , A__ : Dict="1conv" , A__ : Optional[Any]="pixelshuffle" , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
a__ : List[str] = image_size
a__ : Optional[Any] = patch_size
a__ : Dict = num_channels
a__ : Optional[int] = embed_dim
a__ : int = depths
a__ : Optional[int] = len(A__ )
a__ : Dict = num_heads
a__ : List[Any] = window_size
a__ : Optional[int] = mlp_ratio
a__ : Optional[int] = qkv_bias
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = drop_path_rate
a__ : int = hidden_act
a__ : int = use_absolute_embeddings
a__ : Dict = layer_norm_eps
a__ : List[str] = initializer_range
a__ : List[Any] = upscale
a__ : List[Any] = img_range
a__ : Optional[int] = resi_connection
a__ : int = upsampler
| 688 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : str , **lowerCAmelCase__ : int ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : Any , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Any = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 464 |
'''simple docstring'''
from collections import deque
class lowerCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = process_name # process name
SCREAMING_SNAKE_CASE : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE : List[str] = arrival_time
SCREAMING_SNAKE_CASE : List[Any] = burst_time # remaining burst time
SCREAMING_SNAKE_CASE : List[str] = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE : Optional[int] = 0 # time from arrival time to completion time
class lowerCamelCase_ :
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : deque[Process] , lowerCAmelCase__ : int , ):
"""simple docstring"""
# total number of mlfq's queues
SCREAMING_SNAKE_CASE : str = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE : int = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE : str = queue
# current time
SCREAMING_SNAKE_CASE : Any = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE : deque[Process] = deque()
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowercase ( self : int , lowerCAmelCase__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowercase ( self : List[Any] , lowerCAmelCase__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(lowerCAmelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowercase ( self : Dict , lowerCAmelCase__ : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowercase ( self : Dict , lowerCAmelCase__ : deque[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : deque[Process] = deque() # sequence deque of finished process
while len(lowerCAmelCase__ ) != 0:
SCREAMING_SNAKE_CASE : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE : Union[str, Any] = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : deque[Process] , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# set the finish time
SCREAMING_SNAKE_CASE : Tuple = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE : List[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase__ )
self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_ : str = Process('P1', 0, 53)
lowerCAmelCase_ : Optional[int] = Process('P2', 0, 17)
lowerCAmelCase_ : Optional[Any] = Process('P3', 0, 68)
lowerCAmelCase_ : Optional[int] = Process('P4', 0, 24)
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : str = [17, 25]
lowerCAmelCase_ : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_ : Tuple = Process('P1', 0, 53)
lowerCAmelCase_ : int = Process('P2', 0, 17)
lowerCAmelCase_ : Union[str, Any] = Process('P3', 0, 68)
lowerCAmelCase_ : Any = Process('P4', 0, 24)
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Optional[Any] = [17, 25]
lowerCAmelCase_ : Dict = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_ : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_ : List[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 464 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 189 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__snake_case = logging.getLogger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
_a = field(default=A__ , metadata={'help': 'Whether to SortishSamler or not.'} )
_a = field(
default=A__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
_a = field(default=A__ , metadata={'help': 'whether to use adafactor'} )
_a = field(
default=A__ , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
_a = field(
default=A__ , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
_a = field(default=A__ , metadata={'help': 'Dropout probability. Goes into model.config.'} )
_a = field(
default=A__ , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
_a = field(
default='linear' , metadata={'help': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , ) | 189 | 1 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
A = True
from torch.cuda.amp import autocast
A = logging.getLogger(__name__)
def UpperCamelCase_ ( lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class _UpperCamelCase :
"""simple docstring"""
snake_case_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
snake_case_ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
snake_case_ = field(
default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
snake_case_ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
snake_case_ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
snake_case_ = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
snake_case_ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _UpperCamelCase :
"""simple docstring"""
snake_case_ = field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case_ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
snake_case_ = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
snake_case_ = field(
default=lowerCamelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
snake_case_ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case_ = field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
snake_case_ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _UpperCamelCase :
"""simple docstring"""
snake_case_ = 4_2
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def __call__( self : Union[str, Any] , snake_case : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
__magic_name__ : Tuple = [{'''input_values''': feature['''input_values''']} for feature in features]
__magic_name__ : Tuple = [{'''input_ids''': feature['''labels''']} for feature in features]
__magic_name__ : int = self.processor.pad(
snake_case , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__magic_name__ : Tuple = self.processor.pad(
labels=snake_case , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__magic_name__ : List[str] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__magic_name__ : Optional[Any] = labels
return batch
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self : List[Any] , snake_case : nn.Module , snake_case : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
__magic_name__ : Optional[int] = self._prepare_inputs(snake_case )
if self.use_amp:
with autocast():
__magic_name__ : List[Any] = self.compute_loss(snake_case , snake_case )
else:
__magic_name__ : str = self.compute_loss(snake_case , snake_case )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__magic_name__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__magic_name__ : Tuple = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__magic_name__ : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case )
else:
loss.backward()
return loss.detach()
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
__magic_name__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__magic_name__ : Any = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__magic_name__ : Optional[int] = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__magic_name__ : str = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCamelCase : str ):
__magic_name__ : Dict = re.sub(lowerCamelCase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__magic_name__ : List[Any] = train_dataset.map(lowerCamelCase , remove_columns=['''sentence'''] )
__magic_name__ : Tuple = eval_dataset.map(lowerCamelCase , remove_columns=['''sentence'''] )
def extract_all_chars(lowerCamelCase : Dict ):
__magic_name__ : Optional[Any] = ''' '''.join(batch['''text'''] )
__magic_name__ : str = list(set(lowerCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
__magic_name__ : int = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , batch_size=-1 , keep_in_memory=lowerCamelCase , remove_columns=train_dataset.column_names , )
__magic_name__ : Any = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , batch_size=-1 , keep_in_memory=lowerCamelCase , remove_columns=eval_dataset.column_names , )
__magic_name__ : Any = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__magic_name__ : Tuple = {v: k for k, v in enumerate(lowerCamelCase )}
__magic_name__ : int = vocab_dict[''' ''']
del vocab_dict[" "]
__magic_name__ : int = len(lowerCamelCase )
__magic_name__ : Any = len(lowerCamelCase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(lowerCamelCase , lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : Dict = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__magic_name__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase )
__magic_name__ : List[str] = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : Optional[int] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__magic_name__ : Any = min(len(lowerCamelCase ) , data_args.max_train_samples )
__magic_name__ : Tuple = train_dataset.select(range(lowerCamelCase ) )
if data_args.max_val_samples is not None:
__magic_name__ : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
__magic_name__ : str = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCamelCase : Any ):
__magic_name__ : Tuple = torchaudio.load(batch['''path'''] )
__magic_name__ : Optional[int] = resampler(lowerCamelCase ).squeeze().numpy()
__magic_name__ : Any = 1_6000
__magic_name__ : Optional[Any] = batch['''text''']
return batch
__magic_name__ : Dict = train_dataset.map(
lowerCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__magic_name__ : List[str] = eval_dataset.map(
lowerCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCamelCase : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__magic_name__ : int = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(lowerCamelCase )
return batch
__magic_name__ : Any = train_dataset.map(
lowerCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , )
__magic_name__ : int = eval_dataset.map(
lowerCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
__magic_name__ : Any = datasets.load_metric('''wer''' )
def compute_metrics(lowerCamelCase : Dict ):
__magic_name__ : Any = pred.predictions
__magic_name__ : Union[str, Any] = np.argmax(lowerCamelCase , axis=-1 )
__magic_name__ : Union[str, Any] = processor.tokenizer.pad_token_id
__magic_name__ : Any = processor.batch_decode(lowerCamelCase )
# we do not want to group tokens when computing the metrics
__magic_name__ : Optional[int] = processor.batch_decode(pred.label_ids , group_tokens=lowerCamelCase )
__magic_name__ : List[str] = wer_metric.compute(predictions=lowerCamelCase , references=lowerCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__magic_name__ : Optional[Any] = DataCollatorCTCWithPadding(processor=lowerCamelCase , padding=lowerCamelCase )
# Initialize our Trainer
__magic_name__ : Any = CTCTrainer(
model=lowerCamelCase , data_collator=lowerCamelCase , args=lowerCamelCase , compute_metrics=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ : Optional[int] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__magic_name__ : Dict = model_args.model_name_or_path
else:
__magic_name__ : Optional[Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__magic_name__ : List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
__magic_name__ : Union[str, Any] = train_result.metrics
__magic_name__ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__magic_name__ : Union[str, Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics('''train''' , lowerCamelCase )
trainer.save_metrics('''train''' , lowerCamelCase )
trainer.save_state()
# Evaluation
__magic_name__ : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__magic_name__ : Union[str, Any] = trainer.evaluate()
__magic_name__ : Tuple = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase )
__magic_name__ : Any = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics('''eval''' , lowerCamelCase )
trainer.save_metrics('''eval''' , lowerCamelCase )
return results
if __name__ == "__main__":
main() | 703 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ : Optional[Any] = process_story(snake_case )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ''''''
__magic_name__ , __magic_name__ : Optional[int] = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ : Union[str, Any] = process_story(snake_case )
__magic_name__ : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(snake_case , snake_case )
__magic_name__ : Tuple = ['''It was the best of times.''']
self.assertEqual(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.tensor([1, 2, 3, 4] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = 101
__magic_name__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 147 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = 9, 14 # noqa: F841
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCamelCase = mst(__snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCamelCase = tuple(answer[:2] )
_UpperCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __lowerCAmelCase ( A_ : List[str] , A_ : str ) -> Optional[Any]:
inspect_dataset(A_ , A_ )
__UpperCAmelCase = path + """.py"""
assert script_name in os.listdir(A_ )
assert "__pycache__" not in os.listdir(A_ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __lowerCAmelCase ( A_ : int , A_ : str ) -> Any:
inspect_metric(A_ , A_ )
__UpperCAmelCase = path + """.py"""
assert script_name in os.listdir(A_ )
assert "__pycache__" not in os.listdir(A_ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowerCAmelCase ( A_ : Optional[int] , A_ : List[Any] , A_ : str ) -> str:
__UpperCAmelCase = get_dataset_config_info(A_ , config_name=A_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowerCAmelCase ( A_ : str , A_ : Dict , A_ : Optional[Any] ) -> int:
with pytest.raises(A_ ):
get_dataset_config_info(A_ , config_name=A_ )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __lowerCAmelCase ( A_ : Optional[Any] , A_ : str ) -> Union[str, Any]:
__UpperCAmelCase = get_dataset_config_names(A_ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __lowerCAmelCase ( A_ : int , A_ : int , A_ : List[str] ) -> List[str]:
__UpperCAmelCase = get_dataset_infos(A_ )
assert list(infos.keys() ) == expected_configs
__UpperCAmelCase = expected_configs[0]
assert expected_config in infos
__UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowerCAmelCase ( A_ : Any , A_ : Tuple , A_ : Tuple ) -> Tuple:
__UpperCAmelCase = get_dataset_infos(A_ )
assert expected_config in infos
__UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Any , A_ : List[Any] ) -> Any:
with pytest.raises(A_ ):
get_dataset_split_names(A_ , config_name=A_ )
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 'vit_mae'
def __init__( self: List[Any] , __lowerCAmelCase: Any=768 , __lowerCAmelCase: List[str]=12 , __lowerCAmelCase: Optional[int]=12 , __lowerCAmelCase: Tuple=3_072 , __lowerCAmelCase: List[Any]="gelu" , __lowerCAmelCase: Dict=0.0 , __lowerCAmelCase: Tuple=0.0 , __lowerCAmelCase: Any=0.02 , __lowerCAmelCase: List[Any]=1E-12 , __lowerCAmelCase: List[str]=224 , __lowerCAmelCase: Optional[Any]=16 , __lowerCAmelCase: Union[str, Any]=3 , __lowerCAmelCase: Tuple=True , __lowerCAmelCase: Union[str, Any]=16 , __lowerCAmelCase: Optional[int]=512 , __lowerCAmelCase: int=8 , __lowerCAmelCase: int=2_048 , __lowerCAmelCase: str=0.75 , __lowerCAmelCase: Union[str, Any]=False , **__lowerCAmelCase: List[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = decoder_num_attention_heads
__UpperCAmelCase = decoder_hidden_size
__UpperCAmelCase = decoder_num_hidden_layers
__UpperCAmelCase = decoder_intermediate_size
__UpperCAmelCase = mask_ratio
__UpperCAmelCase = norm_pix_loss
| 286 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCamelCase ( a=32 , a=10 , a=100 , a=1026 , a=True , a="data/tokenized_stories_train_wikitext103.jbl" , a="igf_context_pairs.jbl" , ) -> Union[str, Any]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__magic_name__ , __magic_name__ = generate_datasets(
a , a , number=a , min_len=1026 , trim=a )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__magic_name__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
__magic_name__ = load_gpta('''gpt2''' ).to(a )
print('''computing perplexity on objective set''' )
__magic_name__ = compute_perplexity(a , a , a ).item()
print('''perplexity on objective set:''' , a )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a , a , a , a , a , a , a , a )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCamelCase ( a , a=15 , a=128 , a=100 , a="igf_model.pt" , ) -> int:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__magic_name__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
__magic_name__ = SecondaryLearner(a )
# Train secondary learner
__magic_name__ = train_secondary_learner(
a , a , max_epochs=a , batch_size=a , eval_freq=100 , igf_model_path=a , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCamelCase ( a , a , a , a=32 , a=1000 , a=16 , a=1.0 , a=recopy_gpta , a=None , a=10 , a="gpt2_finetuned.pt" , ) -> str:
'''simple docstring'''
__magic_name__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
__magic_name__ = RandomSampler(a )
__magic_name__ = DataLoader(a , sampler=a )
__magic_name__ = max_steps // (len(a )) + 1
__magic_name__ = 0
__magic_name__ = torch.zeros((1, context_len) , dtype=torch.long , device=a )
__magic_name__ , __magic_name__ , __magic_name__ = recopy_model(a , a , a )
model.train()
if secondary_learner is not None:
secondary_learner.to(a )
secondary_learner.eval()
__magic_name__ = []
__magic_name__ = 0
__magic_name__ = []
__magic_name__ = []
# Compute the performance of the transformer model at the beginning
__magic_name__ = compute_perplexity(a , a , a )
test_perps.append(a )
print('''Test perplexity, step''' , a , ''':''' , a )
for epoch in range(int(a ) ):
for step, example in enumerate(a ):
torch.cuda.empty_cache()
__magic_name__ = random.randint(0 , example.size(2 ) - context_len - 1 )
__magic_name__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__magic_name__ = model(a , labels=a )
__magic_name__ = True
if secondary_learner is not None:
__magic_name__ = secondary_learner.forward(
torch.tensor(a , dtype=torch.long , device=a ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__magic_name__ = -1
if predicted_q < threshold:
__magic_name__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__magic_name__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__magic_name__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__magic_name__ = compute_perplexity(a , a , a )
test_perps.append(a )
print('''Test perplexity, step''' , a , ''':''' , a )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a , default=a , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a , default=a , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a , type=a , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a , default=a , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=a , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=a , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a , type=a , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a , type=a , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=a , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
__magic_name__ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
__magic_name__ = training_secondary_learner(
a , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
__magic_name__ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__magic_name__ , __magic_name__ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=a )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a , a , a , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=a , secondary_learner=a , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 432 |
'''simple docstring'''
import functools
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
# Validation
if not isinstance(a , a ) or not all(isinstance(a , a ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(a ) != 3 or not all(isinstance(a , a ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(a ) == 0:
return 0
if min(a ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(a ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
__magic_name__ = set(a )
@functools.cache
def dynamic_programming(a ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ , snake_case__ = set(__lowerCAmelCase ), [start]
while stack:
snake_case__ = stack.pop()
explored.add(__lowerCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__lowerCAmelCase )
return explored
__magic_name__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 530 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_a : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
a : Optional[datasets.Features] =None
def _lowerCAmelCase ( lowercase , lowercase , ) -> List[Any]:
import pyspark
def generate_fn():
__lowerCAmelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select("""*""" ).where(f'part_id = {partition_id}' ).drop("""part_id""" )
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class _UpperCAmelCase ( _BaseExamplesIterable ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,):
'''simple docstring'''
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df,self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df,partition_order=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.split_shard_indices_by_worker(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
return SparkExamplesIterable(self.df,partition_order=__SCREAMING_SNAKE_CASE )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCAmelCase ( datasets.DatasetBuilder ):
a : Optional[int] =SparkConfig
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=__SCREAMING_SNAKE_CASE,config_name=str(self.df.semanticHash() ),**__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
def create_cache_and_write_probe(__SCREAMING_SNAKE_CASE ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir,exist_ok=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = os.path.join(self._cache_dir,"""fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__SCREAMING_SNAKE_CASE,"""a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""","""""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ),1 ).mapPartitions(__SCREAMING_SNAKE_CASE ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(__SCREAMING_SNAKE_CASE ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(__SCREAMING_SNAKE_CASE )
.repartition(1 )
.mapInArrow(__SCREAMING_SNAKE_CASE,"""batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(__SCREAMING_SNAKE_CASE,int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir,os.path.basename(__SCREAMING_SNAKE_CASE ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__SCREAMING_SNAKE_CASE ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]],names=["""task_id""", """num_examples""", """num_bytes"""],)
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=__SCREAMING_SNAKE_CASE,path=working_fpath.replace("""SSSSS""",f'{shard_id:05d}' ).replace("""TTTTT""",f'{task_id:05d}' ),writer_batch_size=__SCREAMING_SNAKE_CASE,storage_options=__SCREAMING_SNAKE_CASE,embed_local_files=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__SCREAMING_SNAKE_CASE )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],names=["""task_id""", """num_examples""", """num_bytes"""],)
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features,path=working_fpath.replace("""SSSSS""",f'{shard_id:05d}' ).replace("""TTTTT""",f'{task_id:05d}' ),writer_batch_size=__SCREAMING_SNAKE_CASE,storage_options=__SCREAMING_SNAKE_CASE,embed_local_files=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__SCREAMING_SNAKE_CASE )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]],names=["""task_id""", """num_examples""", """num_bytes"""],)
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ),os.path.basename(__SCREAMING_SNAKE_CASE ) )
shutil.move(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (
self.df.mapInArrow(__SCREAMING_SNAKE_CASE,"""task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ),pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ),pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ),pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ),)
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "arrow",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = """-TTTTT-SSSSS-of-NNNNN"""
__lowerCAmelCase = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__lowerCAmelCase = path_join(self._output_dir,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(f'Renaming {total_shards} shards.' )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
rename(
__SCREAMING_SNAKE_CASE,fpath.replace("""SSSSS""",f'{shard_id:05d}' ).replace("""TTTTT""",f'{task_id:05d}' ),fpath.replace("""TTTTT-SSSSS""",f'{global_shard_id:05d}' ).replace("""NNNNN""",f'{total_shards:05d}' ),)
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__SCREAMING_SNAKE_CASE ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__SCREAMING_SNAKE_CASE,len(__SCREAMING_SNAKE_CASE ) ).map(lambda __SCREAMING_SNAKE_CASE : _rename_shard(*__SCREAMING_SNAKE_CASE ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""",f'{shard_id:05d}' ).replace("""TTTTT""",f'{task_id:05d}' ),fpath.replace(__SCREAMING_SNAKE_CASE,"""""" ),)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 689 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_a : str = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_a : Dict = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_a : List[str] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ),reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = spearmanr(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCamelCase ={
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCamelCase =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'maskformer'
SCREAMING_SNAKE_CASE_ = {'hidden_size': 'mask_feature_size'}
SCREAMING_SNAKE_CASE_ = ['resnet', 'swin']
SCREAMING_SNAKE_CASE_ = ['detr']
def __init__( self , _snake_case = 2_56 , _snake_case = 2_56 , _snake_case = 0.1 , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 0.0_2 , _snake_case = 1.0 , _snake_case = 1.0 , _snake_case = 1.0 , _snake_case = 20.0 , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowerCamelCase = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowerCamelCase = DetrConfig()
else:
# verify that the decoder is supported
__lowerCamelCase = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case , _snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {",".join(self.decoders_supported )}''' )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = CONFIG_MAPPING[decoder_type]
__lowerCamelCase = config_class.from_dict(_snake_case )
__lowerCamelCase = backbone_config
__lowerCamelCase = decoder_config
# main feature dimension for the model
__lowerCamelCase = fpn_feature_size
__lowerCamelCase = mask_feature_size
# initializer
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
# Hungarian matcher && loss
__lowerCamelCase = cross_entropy_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = no_object_weight
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = self.decoder_config.encoder_attention_heads
__lowerCamelCase = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def _lowerCamelCase ( cls , _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
return cls(
backbone_config=_snake_case , decoder_config=_snake_case , **_snake_case , )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.decoder_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 704 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['input_features', 'attention_mask']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=80 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
__lowerCamelCase = num_mel_bins
__lowerCamelCase = do_ceptral_normalize
__lowerCamelCase = normalize_means
__lowerCamelCase = normalize_vars
__lowerCamelCase = True
def _lowerCamelCase ( self , _snake_case , ):
"""simple docstring"""
__lowerCamelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCamelCase = torch.from_numpy(_snake_case ).unsqueeze(0 )
__lowerCamelCase = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( _snake_case , _snake_case , _snake_case = True , _snake_case = True , _snake_case = 0.0 , ):
"""simple docstring"""
if normalize_means:
__lowerCamelCase = x[:input_length].mean(axis=0 )
__lowerCamelCase = np.subtract(_snake_case , _snake_case )
if normalize_vars:
__lowerCamelCase = x[:input_length].std(axis=0 )
__lowerCamelCase = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
__lowerCamelCase = padding_value
# make sure array is in float32
__lowerCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
__lowerCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowerCamelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowerCamelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
__lowerCamelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [raw_speech]
# extract fbank features
__lowerCamelCase = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCamelCase = BatchFeature({'''input_features''': features} )
__lowerCamelCase = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
__lowerCamelCase = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _snake_case ):
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
__lowerCamelCase = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCamelCase = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCamelCase = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_snake_case )
if return_tensors is not None:
__lowerCamelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 575 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Tuple = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : int = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__a : Optional[int] = 1_0
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for i in range(lowercase_ , lowercase_ ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(lowercase_ )
while left <= right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase = one_third - 1
elif array[two_third] < target:
UpperCamelCase = two_third + 1
else:
UpperCamelCase = one_third + 1
UpperCamelCase = two_third - 1
else:
return -1
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
__a : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__a : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
__a : Optional[Any] = ite_ternary_search(collection, target)
__a : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 606 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( __magic_name__ , __magic_name__=0.999 , __magic_name__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase__ = []
for i in range(__magic_name__ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers]
__lowerCamelCase = 2
@register_to_config
def __init__( self :Optional[int] , _lowercase :int = 10_00 , _lowercase :float = 0.00085 , _lowercase :float = 0.012 , _lowercase :str = "linear" , _lowercase :Optional[Union[np.ndarray, List[float]]] = None , _lowercase :str = "epsilon" , _lowercase :Optional[bool] = False , _lowercase :Optional[bool] = False , _lowercase :float = 1.0 , _lowercase :str = "linspace" , _lowercase :int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowercase__ = torch.tensor(_lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ = torch.linspace(_lowercase , _lowercase , _lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ = betas_for_alpha_bar(_lowercase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
lowercase__ = betas_for_alpha_bar(_lowercase , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowercase , _lowercase , _lowercase )
lowercase__ = use_karras_sigmas
def UpperCAmelCase ( self :List[str] , _lowercase :Tuple , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowercase__ = self.timesteps
lowercase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase__ = 1 if len(_lowercase ) > 1 else 0
else:
lowercase__ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
lowercase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self :Optional[int] , _lowercase :torch.FloatTensor , _lowercase :Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(_lowercase )
lowercase__ = self.sigmas[step_index]
lowercase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self :Any , _lowercase :int , _lowercase :Union[str, torch.device] = None , _lowercase :Optional[int] = None , ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase__ = np.linspace(0 , num_train_timesteps - 1 , _lowercase , dtype=_lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(_lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(_lowercase , 0 , -step_ratio )).round().copy().astype(_lowercase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowercase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase__ = np.log(_lowercase )
lowercase__ = np.interp(_lowercase , np.arange(0 , len(_lowercase ) ) , _lowercase )
if self.config.use_karras_sigmas:
lowercase__ = self._convert_to_karras(in_sigmas=_lowercase , num_inference_steps=self.num_inference_steps )
lowercase__ = np.array([self._sigma_to_t(_lowercase , _lowercase ) for sigma in sigmas] )
lowercase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase__ = torch.from_numpy(_lowercase ).to(device=_lowercase )
lowercase__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase__ = torch.from_numpy(_lowercase )
lowercase__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowercase ).startswith("mps" ):
# mps does not support float64
lowercase__ = timesteps.to(_lowercase , dtype=torch.floataa )
else:
lowercase__ = timesteps.to(device=_lowercase )
# empty dt and derivative
lowercase__ = None
lowercase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase__ = defaultdict(_lowercase )
def UpperCAmelCase ( self :List[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = np.log(_lowercase )
# get distribution
lowercase__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase__ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase__ = low_idx + 1
lowercase__ = log_sigmas[low_idx]
lowercase__ = log_sigmas[high_idx]
# interpolate sigmas
lowercase__ = (low - log_sigma) / (low - high)
lowercase__ = np.clip(_lowercase , 0 , 1 )
# transform interpolation to time range
lowercase__ = (1 - w) * low_idx + w * high_idx
lowercase__ = t.reshape(sigma.shape )
return t
def UpperCAmelCase ( self :Optional[Any] , _lowercase :torch.FloatTensor , _lowercase :int ):
'''simple docstring'''
lowercase__ = in_sigmas[-1].item()
lowercase__ = in_sigmas[0].item()
lowercase__ = 7.0 # 7.0 is the value used in the paper
lowercase__ = np.linspace(0 , 1 , _lowercase )
lowercase__ = sigma_min ** (1 / rho)
lowercase__ = sigma_max ** (1 / rho)
lowercase__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return self.dt is None
def UpperCAmelCase ( self :int , _lowercase :Union[torch.FloatTensor, np.ndarray] , _lowercase :Union[float, torch.FloatTensor] , _lowercase :Union[torch.FloatTensor, np.ndarray] , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(_lowercase )
# advance index counter by 1
lowercase__ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
lowercase__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase__ = self.sigmas[step_index - 1]
lowercase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase__ = 0
lowercase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_next
lowercase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_next
lowercase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowercase__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase__ = sigma_next - sigma_hat
# store for 2nd order step
lowercase__ = derivative
lowercase__ = dt
lowercase__ = sample
else:
# 2. 2nd order / Heun's method
lowercase__ = (sample - pred_original_sample) / sigma_next
lowercase__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase__ = self.dt
lowercase__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :torch.FloatTensor , _lowercase :torch.FloatTensor , _lowercase :torch.FloatTensor , ):
'''simple docstring'''
lowercase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowercase ):
# mps does not support float64
lowercase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase__ = self.timesteps.to(original_samples.device )
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = [self.index_for_timestep(_lowercase , _lowercase ) for t in timesteps]
lowercase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase__ = sigma.unsqueeze(-1 )
lowercase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self :Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 611 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ['image_processor', 'tokenizer']
__lowerCamelCase = 'BridgeTowerImageProcessor'
__lowerCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self :int , _lowercase :str , _lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __call__( self :Union[str, Any] , _lowercase :List[str] , _lowercase :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase :bool = True , _lowercase :Union[bool, str, PaddingStrategy] = False , _lowercase :Union[bool, str, TruncationStrategy] = None , _lowercase :Optional[int] = None , _lowercase :int = 0 , _lowercase :Optional[int] = None , _lowercase :Optional[bool] = None , _lowercase :Optional[bool] = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = True , _lowercase :Optional[Union[str, TensorType]] = None , **_lowercase :int , ):
'''simple docstring'''
lowercase__ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
_lowercase , return_tensors=_lowercase , do_normalize=_lowercase , do_center_crop=_lowercase , **_lowercase )
encoding.update(_lowercase )
return encoding
def UpperCAmelCase ( self :List[Any] , *_lowercase :List[str] , **_lowercase :Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase ( self :Union[str, Any] , *_lowercase :Any , **_lowercase :Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 611 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowerCamelCase : Any = NewType("DataClass", Any)
_lowerCamelCase : Dict = NewType("DataClassType", Any)
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def _UpperCAmelCase (UpperCamelCase_ : list ):
'''simple docstring'''
_lowerCAmelCase : Any = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _UpperCAmelCase (*,
UpperCamelCase_ : Union[str, List[str]] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : Any = dataclasses.MISSING , UpperCamelCase_ : Callable[[], Any] = dataclasses.MISSING , UpperCamelCase_ : dict = None , **UpperCamelCase_ : str , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase : List[Any] = {}
if aliases is not None:
_lowerCAmelCase : Any = aliases
if help is not None:
_lowerCAmelCase : int = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case (_a ):
lowerCAmelCase__ = 42
def __init__( self : Any , _UpperCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if "formatter_class" not in kwargs:
_lowerCAmelCase : List[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_UpperCAmelCase )
if dataclasses.is_dataclass(_UpperCAmelCase ):
_lowerCAmelCase : List[Any] = [dataclass_types]
_lowerCAmelCase : Optional[int] = list(_UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : ArgumentParser , _UpperCAmelCase : dataclasses.Field ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = f"--{field.name}"
_lowerCAmelCase : str = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _UpperCAmelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_lowerCAmelCase : str = kwargs.pop("""aliases""" , [] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : int = [aliases]
_lowerCAmelCase : List[str] = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_UpperCAmelCase , """UnionType""" ) and isinstance(_UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f" Problem encountered in field '{field.name}'." )
if type(_UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase : int = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase : int = (
field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowerCAmelCase : Any = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase : str = {}
if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase ) and issubclass(field.type , _UpperCAmelCase )):
if origin_type is Literal:
_lowerCAmelCase : int = field.type.__args__
else:
_lowerCAmelCase : List[Any] = [x.value for x in field.type]
_lowerCAmelCase : int = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_lowerCAmelCase : Any = field.default
else:
_lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase : List[str] = copy(_UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase : int = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase : Optional[int] = """?"""
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase : Tuple = True
elif isclass(_UpperCAmelCase ) and issubclass(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : List[str] = field.type.__args__[0]
_lowerCAmelCase : List[str] = """+"""
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase : Tuple = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase : str = True
else:
_lowerCAmelCase : int = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase : int = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase : Any = field.default_factory()
else:
_lowerCAmelCase : List[Any] = True
parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase : List[Any] = False
parser.add_argument(f"--no_{field.name}" , action="""store_false""" , dest=field.name , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : DataClassType ) -> Tuple:
'''simple docstring'''
if hasattr(_UpperCAmelCase , """_argument_group_name""" ):
_lowerCAmelCase : Dict = self.add_argument_group(dtype._argument_group_name )
else:
_lowerCAmelCase : Optional[int] = self
try:
_lowerCAmelCase : Dict[str, type] = get_type_hints(_UpperCAmelCase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_UpperCAmelCase ):
_lowerCAmelCase : List[str] = """.""".join(map(_UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_UpperCAmelCase ):
if not field.init:
continue
_lowerCAmelCase : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[Any]=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowerCAmelCase : Optional[Any] = []
if args_filename:
args_files.append(Path(_UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase : List[Any] = ArgumentParser()
args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = vars(_UpperCAmelCase ).get(args_file_flag.lstrip("""-""" ) , _UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_UpperCAmelCase ) for p in cmd_args_file_paths] )
_lowerCAmelCase : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase : str = self.parse_known_args(args=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase : Tuple = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
_lowerCAmelCase : List[str] = {k: v for k, v in vars(_UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Dict[str, Any] , _UpperCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = set(args.keys() )
_lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase : int = {f.name for f in dataclasses.fields(_UpperCAmelCase ) if f.init}
_lowerCAmelCase : List[str] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowerCAmelCase : Tuple = dtype(**_UpperCAmelCase )
outputs.append(_UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase )}" )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(_UpperCAmelCase ) , encoding="""utf-8""" ) as open_json_file:
_lowerCAmelCase : Union[str, Any] = json.loads(open_json_file.read() )
_lowerCAmelCase : List[Any] = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase ).read_text() ) , allow_extra_keys=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 429 |
from collections import deque
from .hash_table import HashTable
class __snake_case (_a ):
def __init__( self : int , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
_lowerCAmelCase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.values[key]
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 429 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case : Optional[Any] = 9, 14 # noqa: F841
snake_case : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case : Dict = mst(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case : int = tuple(answer[:2] )
snake_case : Optional[int] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 715 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = LayoutLMTokenizer
__UpperCAmelCase : List[Any] = LayoutLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = True
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = "UNwant\u00E9d,running"
snake_case : Optional[int] = "unwanted, running"
return input_text, output_text
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.tokenizer_class(self.vocab_file )
snake_case : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
pass
| 117 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _lowerCAmelCase ( __a ):
SCREAMING_SNAKE_CASE_: int = """dpt"""
def __init__( self , lowerCAmelCase_=7_6_8 , lowerCAmelCase_=1_2 , lowerCAmelCase_=1_2 , lowerCAmelCase_=3_0_7_2 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-12 , lowerCAmelCase_=3_8_4 , lowerCAmelCase_=1_6 , lowerCAmelCase_=3 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=[2, 5, 8, 1_1] , lowerCAmelCase_="project" , lowerCAmelCase_=[4, 2, 1, 0.5] , lowerCAmelCase_=[9_6, 1_9_2, 3_8_4, 7_6_8] , lowerCAmelCase_=2_5_6 , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=0.4 , lowerCAmelCase_=2_5_5 , lowerCAmelCase_=0.1 , lowerCAmelCase_=[1, 1_0_2_4, 2_4, 2_4] , lowerCAmelCase_=[0, 1] , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> str:
super().__init__(**lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
_SCREAMING_SNAKE_CASE : str = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_SCREAMING_SNAKE_CASE : int = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info('Initializing the config with a `BiT` backbone.' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_SCREAMING_SNAKE_CASE : Dict = backbone_featmap_shape
_SCREAMING_SNAKE_CASE : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Tuple = None
_SCREAMING_SNAKE_CASE : List[Any] = []
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = image_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
_SCREAMING_SNAKE_CASE : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
_SCREAMING_SNAKE_CASE : str = readout_type
_SCREAMING_SNAKE_CASE : Optional[int] = reassemble_factors
_SCREAMING_SNAKE_CASE : Union[str, Any] = neck_hidden_sizes
_SCREAMING_SNAKE_CASE : Optional[int] = fusion_hidden_size
_SCREAMING_SNAKE_CASE : Tuple = head_in_index
_SCREAMING_SNAKE_CASE : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE : int = use_auxiliary_head
_SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight
_SCREAMING_SNAKE_CASE : Optional[int] = semantic_loss_ignore_index
_SCREAMING_SNAKE_CASE : Optional[Any] = semantic_classifier_dropout
def A ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE : List[Any] = self.__class__.model_type
return output
| 621 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[Any] = job["""started_at"""]
_UpperCAmelCase : List[Any] = job["""completed_at"""]
_UpperCAmelCase : Optional[int] = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : str = date_parser.parse(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase : Tuple = start
_UpperCAmelCase : str = end
_UpperCAmelCase : List[Any] = duration_in_min
return job_info
def __A ( lowerCAmelCase_ , lowerCAmelCase_=None ):
_UpperCAmelCase : str = None
if token is not None:
_UpperCAmelCase : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
_UpperCAmelCase : Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase : Union[str, Any] = requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
_UpperCAmelCase : int = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
_UpperCAmelCase : str = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = requests.get(url + f"&page={i + 2}" , headers=lowerCAmelCase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowerCAmelCase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
lowerCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
lowerCAmelCase_ : int = get_job_time(args.workflow_run_id)
lowerCAmelCase_ : Optional[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"{k}: {v['duration']}")
| 414 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1_3 , lowerCAmelCase_=[3_0, 3_0] , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=3_2 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=3_7 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=1_0 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=1_0 , ) -> List[Any]:
"""simple docstring"""
a_ =parent
a_ =batch_size
a_ =image_size
a_ =patch_size
a_ =num_channels
a_ =is_training
a_ =use_labels
a_ =hidden_size
a_ =num_hidden_layers
a_ =num_attention_heads
a_ =intermediate_size
a_ =hidden_act
a_ =hidden_dropout_prob
a_ =attention_probs_dropout_prob
a_ =type_sequence_label_size
a_ =initializer_range
a_ =num_labels
a_ =scope
a_ =n_targets
a_ =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a_ =(image_size[1] // patch_size) * (image_size[0] // patch_size)
a_ =num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
a_ =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a_ =[]
for i in range(self.batch_size):
a_ ={}
a_ =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_SCREAMING_SNAKE_CASE)
a_ =torch.rand(self.n_targets , 4 , device=_SCREAMING_SNAKE_CASE)
labels.append(_SCREAMING_SNAKE_CASE)
a_ =self.get_config()
return config, pixel_values, labels
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> Tuple:
"""simple docstring"""
a_ =YolosModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
a_ =model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> int:
"""simple docstring"""
a_ =YolosForObjectDetection(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
a_ =model(pixel_values=_SCREAMING_SNAKE_CASE)
a_ =model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
a_ =model(pixel_values=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def lowercase_ ( self) -> List[Any]:
"""simple docstring"""
a_ =self.prepare_config_and_inputs()
a_ , a_ , a_ =config_and_inputs
a_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__magic_name__ : List[str] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False) -> Any:
"""simple docstring"""
a_ =super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a_ =[]
for i in range(self.model_tester.batch_size):
a_ ={}
a_ =torch.ones(
size=(self.model_tester.n_targets,) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long)
a_ =torch.ones(
self.model_tester.n_targets , 4 , device=_SCREAMING_SNAKE_CASE , dtype=torch.float)
labels.append(_SCREAMING_SNAKE_CASE)
a_ =labels
return inputs_dict
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =YolosModelTester(self)
a_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=3_7)
def lowercase_ ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self) -> Any:
"""simple docstring"""
pass
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(_SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear))
def lowercase_ ( self) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =model_class(_SCREAMING_SNAKE_CASE)
a_ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ =[*signature.parameters.keys()]
a_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> Optional[int]:
"""simple docstring"""
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
a_ =True
# in YOLOS, the seq_len is different
a_ =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a_ =True
a_ =False
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a_ =len(_SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
a_ =True
a_ =True
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =1
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE))
a_ =outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self) -> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
a_ =model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
a_ =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
a_ =outputs.hidden_states
a_ =getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
# YOLOS has a different seq_length
a_ =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a_ , a_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def lowercase_ ( self) -> str:
"""simple docstring"""
a_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_SCREAMING_SNAKE_CASE)
@slow
def lowercase_ ( self) -> List[str]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ =YolosModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
def UpperCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
a_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowercase_ ( self) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small") if is_vision_available() else None
@slow
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =YolosForObjectDetection.from_pretrained("hustvl/yolos-small").to(_SCREAMING_SNAKE_CASE)
a_ =self.default_image_processor
a_ =prepare_img()
a_ =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt").to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
a_ =model(inputs.pixel_values)
# verify outputs
a_ =torch.Size((1, 1_0_0, 9_2))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
a_ =torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_SCREAMING_SNAKE_CASE , )
a_ =torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4))
# verify postprocessing
a_ =image_processor.post_process_object_detection(
_SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
a_ =torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(_SCREAMING_SNAKE_CASE)
a_ =[7_5, 7_5, 1_7, 6_3, 1_7]
a_ =torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(_SCREAMING_SNAKE_CASE)
self.assertEqual(len(results["scores"]) , 5)
self.assertTrue(torch.allclose(results["scores"] , _SCREAMING_SNAKE_CASE , atol=1e-4))
self.assertSequenceEqual(results["labels"].tolist() , _SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(results["boxes"][0, :] , _SCREAMING_SNAKE_CASE))
| 708 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowercase ):
__lowercase : str = (KDPMaDiscreteScheduler,)
__lowercase : List[Any] = 10
def lowercase ( self , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase_ )
return config
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase_ , beta_end=lowerCamelCase_ )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def lowercase ( self ) -> int:
"""simple docstring"""
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def lowercase ( self ) -> List[str]:
"""simple docstring"""
if torch_device == "mps":
return
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase_ )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(lowerCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(lowerCamelCase_ ) )
_UpperCamelCase = torch.mean(torch.abs(lowerCamelCase_ ) )
if str(lowerCamelCase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 147 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _lowercase ( a__ : Dict ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
_UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(a__ ) < version.parse("0.17.0" ):
return method
def wrapper(self : List[str] , *a__ : str , **a__ : int ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *a__ , **a__ )
return wrapper
| 147 | 1 |
'''simple docstring'''
_lowerCAmelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _A ( ):
snake_case__ : Dict = input('''Enter message: ''' )
snake_case__ : Union[str, Any] = input('''Enter key [alphanumeric]: ''' )
snake_case__ : List[Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
snake_case__ : List[Any] = '''encrypt'''
snake_case__ : List[str] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
snake_case__ : List[Any] = '''decrypt'''
snake_case__ : str = decrypt_message(snake_case__ , snake_case__ )
print(f'''\n{mode.title()}ed message:''' )
print(snake_case__ )
def _A ( snake_case__ : str , snake_case__ : str ):
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def _A ( snake_case__ : str , snake_case__ : str ):
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def _A ( snake_case__ : str , snake_case__ : str , snake_case__ : str ):
snake_case__ : Optional[int] = []
snake_case__ : Union[str, Any] = 0
snake_case__ : str = key.upper()
for symbol in message:
snake_case__ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
snake_case__ : List[str] = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class a__ ( __snake_case ):
A__ : List[str] = 'blip_2_vision_model'
def __init__( self , UpperCAmelCase=1_4_0_8 , UpperCAmelCase=6_1_4_4 , UpperCAmelCase=3_9 , UpperCAmelCase=1_6 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.00_001 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-10 , UpperCAmelCase=True , **UpperCAmelCase , ) -> List[Any]:
super().__init__(**UpperCAmelCase )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
__a = qkv_bias
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : Optional[Any] = 'blip_2_qformer'
def __init__( self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=2 , UpperCAmelCase=1_4_0_8 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = cross_attention_frequency
__a = encoder_hidden_size
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
__a = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : Union[str, Any] = 'blip-2'
A__ : Tuple = True
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=3_2 , **UpperCAmelCase ) -> List[str]:
super().__init__(**UpperCAmelCase )
if vision_config is None:
__a = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
__a = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
__a = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__a = BlipaVisionConfig(**UpperCAmelCase )
__a = BlipaQFormerConfig(**UpperCAmelCase )
__a = text_config['model_type'] if 'model_type' in text_config else 'opt'
__a = CONFIG_MAPPING[text_model_type](**UpperCAmelCase )
__a = self.text_config.tie_word_embeddings
__a = self.text_config.is_encoder_decoder
__a = num_query_tokens
__a = self.vision_config.hidden_size
__a = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a = 1.0
__a = 0.02
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.qformer_config.to_dict()
__a = self.text_config.to_dict()
__a = self.__class__.model_type
return output
| 559 | import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase_ : List[str] = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCamelCase_ : List[Any] = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def lowerCAmelCase( __lowerCamelCase ):
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(__lowerCamelCase )
# emb -> embedding
if name.startswith('emb.' ):
__a = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__a = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__a = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , __lowerCamelCase )
# ffn -> feed_forward
__a = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , __lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__a = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__a = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__a = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__a = 'rwkv.' + name
__a = weight
return state_dict
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , __lowerCamelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__a = 5_0277
__a = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=__lowerCamelCase )
__a = len(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
__a = RwkvConfig(
vocab_size=__lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCamelCase )
# 3. Download model file then convert state_dict
__a = hf_hub_download(__lowerCamelCase , __lowerCamelCase )
__a = torch.load(__lowerCamelCase , map_location='cpu' )
__a = convert_state_dict(__lowerCamelCase )
# 4. Split in shards and save
__a , __a = shard_checkpoint(__lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if index is not None:
__a = os.path.join(__lowerCamelCase , __lowerCamelCase )
# Save the index as well
with open(__lowerCamelCase , 'w' , encoding='utf-8' ) as f:
__a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '\n'
f.write(__lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__a = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
model.push_to_hub(__lowerCamelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase_ : int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 559 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : List[str] , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Optional[Any] = 13
UpperCamelCase_: Any = 7
UpperCamelCase_: int = True
UpperCamelCase_: Union[str, Any] = True
UpperCamelCase_: List[Any] = True
UpperCamelCase_: Any = True
UpperCamelCase_: int = True
UpperCamelCase_: str = False
UpperCamelCase_: Optional[int] = False
UpperCamelCase_: Tuple = False
UpperCamelCase_: int = 2
UpperCamelCase_: Any = 99
UpperCamelCase_: Optional[Any] = 0
UpperCamelCase_: Optional[Any] = 32
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: Any = 4
UpperCamelCase_: Dict = 0.1
UpperCamelCase_: Tuple = 0.1
UpperCamelCase_: Dict = 512
UpperCamelCase_: Tuple = 16
UpperCamelCase_: Any = 2
UpperCamelCase_: int = 0.02
UpperCamelCase_: Tuple = 3
UpperCamelCase_: List[Any] = 4
UpperCamelCase_: int = '''last'''
UpperCamelCase_: int = True
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Optional[int] = 0
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase_: int = None
if self.use_input_lengths:
UpperCamelCase_: Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase_: int = None
if self.use_token_type_ids:
UpperCamelCase_: str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase_: Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : int , ):
UpperCamelCase_: Dict = TFFlaubertModel(config=UpperCamelCase__ )
UpperCamelCase_: List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase_: Optional[Any] = model(UpperCamelCase__ )
UpperCamelCase_: List[str] = [input_ids, input_mask]
UpperCamelCase_: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int , ):
UpperCamelCase_: List[Any] = TFFlaubertWithLMHeadModel(UpperCamelCase__ )
UpperCamelCase_: Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCamelCase_: Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : Dict , ):
UpperCamelCase_: Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase__ )
UpperCamelCase_: Dict = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase_: Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : str , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , ):
UpperCamelCase_: Any = TFFlaubertForSequenceClassification(UpperCamelCase__ )
UpperCamelCase_: List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCamelCase_: Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : int , ):
UpperCamelCase_: List[str] = self.num_labels
UpperCamelCase_: Optional[int] = TFFlaubertForTokenClassification(config=UpperCamelCase__ )
UpperCamelCase_: Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase_: Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , ):
UpperCamelCase_: List[Any] = self.num_choices
UpperCamelCase_: int = TFFlaubertForMultipleChoice(config=UpperCamelCase__ )
UpperCamelCase_: Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_: int = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase_: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.prepare_config_and_inputs()
(
UpperCamelCase_
): Optional[int] = config_and_inputs
UpperCamelCase_: Optional[Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCamelCase : Union[str, Any] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : int , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = TFFlaubertModelTester(self )
UpperCamelCase_: Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , emb_dim=37 )
def lowerCAmelCase__ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = TFFlaubertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase_: Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase_: Tuple = model(UpperCamelCase__ )[0]
UpperCamelCase_: str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase_: Optional[Any] = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 712 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
# fmt: off
__lowercase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
__lowercase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def _snake_case ( self : Any , **lowerCamelCase : Any ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : List[str] , **lowerCamelCase : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : Optional[int] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__lowercase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase , return_tensors="np" )
__lowercase = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = "lower newer"
__lowercase = processor(text=lowerCamelCase )
__lowercase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = "lower newer"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = self.prepare_image_inputs()
__lowercase = processor(images=lowerCamelCase , visual_prompt=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCamelCase )
__lowercase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 402 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__lowercase = 1_0_2_4
__lowercase = 4_0_9_6
__lowercase = 2_4
__lowercase = 1_6
__lowercase = [5, 1_1, 1_7, 2_3]
__lowercase = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__lowercase = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__lowercase = 7_6_8
__lowercase = [1, 1, 1, 0.5]
__lowercase = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__lowercase = 1_5_0
__lowercase = 1_6
__lowercase = (1, 3_8_4, 3_8_4)
__lowercase = False
__lowercase = "project"
if "ade" in checkpoint_url:
__lowercase = True
__lowercase = 7_6_8
__lowercase = [1, 1, 1, 0.5]
__lowercase = 1_5_0
__lowercase = 1_6
__lowercase = "huggingface/label-files"
__lowercase = "ade20k-id2label.json"
__lowercase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
__lowercase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowercase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__lowercase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__lowercase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__lowercase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__lowercase = name.replace("proj" , "projection" )
if "blocks" in name:
__lowercase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowercase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__lowercase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__lowercase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__lowercase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__lowercase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__lowercase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__lowercase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__lowercase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__lowercase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__lowercase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__lowercase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__lowercase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__lowercase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__lowercase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowercase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowercase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__lowercase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__lowercase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__lowercase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__lowercase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__lowercase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__lowercase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__lowercase = name.replace("pretrained" , "dpt" )
if "bn" in name:
__lowercase = name.replace("bn" , "batch_norm" )
if "head" in name:
__lowercase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__lowercase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__lowercase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__lowercase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__lowercase = name.replace(".." , "." )
if "stem.conv" in name:
__lowercase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__lowercase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__lowercase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__lowercase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__lowercase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__lowercase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__lowercase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[
-config.hidden_size :, :
]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ):
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase = get_dpt_config(_SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowercase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(_SCREAMING_SNAKE_CASE )
__lowercase = val
# read in qkv matrices
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__lowercase = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) if "ade" in checkpoint_url else DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__lowercase = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__lowercase = DPTImageProcessor(size=_SCREAMING_SNAKE_CASE )
__lowercase = prepare_img()
__lowercase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
__lowercase = model(**_SCREAMING_SNAKE_CASE ).logits if "ade" in checkpoint_url else model(**_SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
__lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case__ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 402 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}
def _UpperCamelCase ( ) -> int:
lowerCamelCase_ = ArgumentParser(
'HuggingFace Datasets CLI tool' ,usage='datasets-cli <command> [<args>]' ,allow_abbrev=__UpperCamelCase )
lowerCamelCase_ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
TestCommand.register_subcommand(__UpperCamelCase )
RunBeamCommand.register_subcommand(__UpperCamelCase )
DummyDataCommand.register_subcommand(__UpperCamelCase )
# Parse args
lowerCamelCase_ ,lowerCamelCase_ = parser.parse_known_args()
if not hasattr(__UpperCamelCase ,'func' ):
parser.print_help()
exit(1 )
lowerCamelCase_ = parse_unknown_args(__UpperCamelCase )
# Run
lowerCamelCase_ = args.func(__UpperCamelCase ,**__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 384 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
A_ = TypeVar("T")
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase_ = {}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
lowerCamelCase_ = DisjointSetTreeNode(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
self.link(self.find_set(SCREAMING_SNAKE_CASE_ ) , self.find_set(SCREAMING_SNAKE_CASE_ ) )
class UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase_ = {}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if node not in self.connections:
lowerCamelCase_ = {}
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def UpperCamelCase( self ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(SCREAMING_SNAKE_CASE_ )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = disjoint_set.find_set(SCREAMING_SNAKE_CASE_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
disjoint_set.union(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return graph
| 384 | 1 |
import logging
from transformers import PretrainedConfig
__UpperCAmelCase : Tuple = logging.getLogger(__name__)
__UpperCAmelCase : List[Any] = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = 'bertabs'
def __init__( self : int , __snake_case : Optional[int]=30522 , __snake_case : Dict=512 , __snake_case : List[str]=6 , __snake_case : Optional[int]=512 , __snake_case : Any=8 , __snake_case : Union[str, Any]=512 , __snake_case : Optional[Any]=0.2 , __snake_case : str=6 , __snake_case : str=768 , __snake_case : Any=8 , __snake_case : Union[str, Any]=2048 , __snake_case : Tuple=0.2 , **__snake_case : Optional[Any] , ) -> int:
super().__init__(**__snake_case )
_a : Union[str, Any] = vocab_size
_a : int = max_pos
_a : Dict = enc_layers
_a : Any = enc_hidden_size
_a : List[Any] = enc_heads
_a : Tuple = enc_ff_size
_a : int = enc_dropout
_a : Any = dec_layers
_a : Tuple = dec_hidden_size
_a : Optional[Any] = dec_heads
_a : Union[str, Any] = dec_ff_size
_a : Tuple = dec_dropout
| 471 |
__UpperCAmelCase : int = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Optional[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase : list[bool | None] = [None] * 10_000_000
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : List[Any] = False
def lowerCamelCase_ ( UpperCamelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_a : Optional[Any] = chain(next_number(UpperCamelCase_ ) )
_a : Dict = number_chain
while number < 1000_0000:
_a : Any = number_chain
number *= 10
return number_chain
def lowerCamelCase_ ( UpperCamelCase_ = 1000_0000 ):
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 471 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case = 1_0_0_0_0
snake_case = None
snake_case = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case = ParquetConfig
def A( self : Dict ) -> Tuple:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A( self : List[str] ,_SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE ,(str, list, tuple) ):
A = data_files
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
A = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ,'rb' ) as f:
A = datasets.Features.from_arrow_schema(pq.read_schema(_SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE ,gen_kwargs={'files': files} ) )
return splits
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(_SCREAMING_SNAKE_CASE ,self.info.features.arrow_schema )
return pa_table
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
A = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE ,'rb' ) as f:
A = pq.ParquetFile(_SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
A = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(_SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 110 |
from collections import deque
from .hash_table import HashTable
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
A = self.values[key]
def A( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A( self : Dict ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 110 | 1 |
import functools
from typing import Any
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or not all(
isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) and len(UpperCAmelCase__ ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
A_ : Dict = {}
A_ : List[str] = """WORD_KEEPER"""
for word in words:
A_ : int = trie
for c in word:
if c not in trie_node:
A_ : List[str] = {}
A_ : Any = trie_node[c]
A_ : Any = True
A_ : int = len(UpperCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCAmelCase ) -> bool:
if index == len_string:
return True
A_ : Any = trie
for i in range(UpperCAmelCase__ ,UpperCAmelCase__ ):
A_ : Optional[int] = trie_node.get(string[i] ,UpperCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase__ ,UpperCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__:
lowerCAmelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''The column name of the images in the files.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
__SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
__SCREAMING_SNAKE_CASE = self.validation_dir
__SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class A__:
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class A__( __magic_name__ ):
lowerCAmelCase = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _a ( UpperCAmelCase__ ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def _a ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__SCREAMING_SNAKE_CASE = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
__SCREAMING_SNAKE_CASE = ds['''train'''].train_test_split(data_args.train_val_split )
__SCREAMING_SNAKE_CASE = split['''train''']
__SCREAMING_SNAKE_CASE = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
__SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(UpperCAmelCase__ )
if training_args.do_train:
__SCREAMING_SNAKE_CASE = ds['''train'''].column_names
else:
__SCREAMING_SNAKE_CASE = ds['''validation'''].column_names
if data_args.image_column_name is not None:
__SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
__SCREAMING_SNAKE_CASE = '''image'''
elif "img" in column_names:
__SCREAMING_SNAKE_CASE = '''img'''
else:
__SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__SCREAMING_SNAKE_CASE = image_processor.size['''shortest_edge''']
else:
__SCREAMING_SNAKE_CASE = (image_processor.size['''height'''], image_processor.size['''width'''])
__SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Compute absolute learning rate
__SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase__ )
trainer.save_metrics('''eval''' , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 482 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
__A : int = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
__A : Any = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : int =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 705 | """simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] =logging.get_logger(__name__)
lowerCamelCase : str ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Tuple ={
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase : Optional[int] ={
'''camembert-base''': 5_12,
}
lowerCamelCase : Dict ='''▁'''
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
__A : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__A : str = len(self.fairseq_tokens_to_ids )
__A : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : int = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _a ( self ):
'''simple docstring'''
__A : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowerCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = []
__A : Optional[int] = ''
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__A : Optional[Any] = True
__A : List[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
__A : List[str] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__A : Optional[int] = self.__dict__.copy()
__A : List[Any] = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Dict = {}
__A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A : str = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , 'wb' ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 237 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 147 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _lowerCAmelCase ( _A ):
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_encoder_blocks''' ) )
class _lowerCAmelCase :
def __init__( self : Union[str, Any] , a : str , a : Tuple=13 , a : Union[str, Any]=64 , a : Dict=3 , a : Union[str, Any]=4 , a : Dict=[2, 2, 2, 2] , a : Union[str, Any]=[8, 4, 2, 1] , a : List[Any]=[16, 32, 64, 128] , a : Optional[Any]=[1, 4, 8, 16] , a : Optional[Any]=[1, 2, 4, 8] , a : Union[str, Any]=True , a : int=True , a : Tuple="gelu" , a : List[Any]=0.1 , a : int=0.1 , a : List[Any]=0.02 , a : List[Any]=3 , a : Optional[int]=None , ) -> Tuple:
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = num_encoder_blocks
lowercase = sr_ratios
lowercase = depths
lowercase = hidden_sizes
lowercase = downsampling_rates
lowercase = num_attention_heads
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = num_labels
lowercase = scope
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = SegformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
lowercase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCAmelCase ( self : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
lowercase = self.num_labels
lowercase = SegformerForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase ( self : Optional[Any] , a : Tuple , a : Dict , a : Optional[int] ) -> int:
"""simple docstring"""
lowercase = 1
lowercase = SegformerForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowercase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__lowerCamelCase )
lowercase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _A , _A , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : List[str] = False
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = SegformerModelTester(self )
lowercase = SegformerConfigTester(self , config_class=__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowerCamelCase )
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowerCamelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCamelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.attentions
lowercase = sum(self.model_tester.depths )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
lowercase = (self.model_tester.image_size // 4) ** 2
lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowercase = (self.model_tester.image_size // 32) ** 2
lowercase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowercase = len(__lowerCamelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
lowercase = (self.model_tester.image_size // 4) ** 2
lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(a : Union[str, Any] , a : Union[str, Any] , a : Optional[Any] ):
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ):
continue
lowercase = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
lowercase = model(**__lowerCamelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SegformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def A_ ( ):
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
# only resize + normalize
lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
lowercase = model(__lowerCamelCase )
lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# only resize + normalize
lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
lowercase = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__lowerCamelCase )
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
lowercase = model(__lowerCamelCase )
lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowercase = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-1 ) )
@slow
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
# only resize + normalize
lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
lowercase = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__lowerCamelCase )
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
lowercase = model(__lowerCamelCase )
lowercase = outputs.logits.detach().cpu()
lowercase = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(500, 300)] )
lowercase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
lowercase = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
lowercase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase ) | 717 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowerCAmelCase = '''http://www.mocksite.com/file1.txt'''
__lowerCAmelCase = '''"text": ["foo", "foo"]'''
__lowerCAmelCase = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class _lowerCAmelCase :
__lowerCAmelCase : List[Any] = 2_00
__lowerCAmelCase : Dict = {'''Content-Length''': '''100'''}
__lowerCAmelCase : Dict = {}
def _lowerCAmelCase ( self : Any , **a : Dict ) -> Dict:
"""simple docstring"""
return [bytes(a , '''utf-8''' )]
def A_ ( *__UpperCamelCase : int , **__UpperCamelCase : Any ):
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def A_ ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ):
import requests
monkeypatch.setattr(__UpperCamelCase , '''request''' , __UpperCamelCase )
lowercase = URL
if issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = url
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = [url]
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = {'''train''': url}
lowercase = '''dummy'''
lowercase = '''downloads'''
lowercase = tmp_path
lowercase = DownloadConfig(
cache_dir=os.path.join(__UpperCamelCase , __UpperCamelCase ) , use_etag=__UpperCamelCase , )
lowercase = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase )
lowercase = dl_manager.download(__UpperCamelCase )
lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = [downloaded_paths]
lowercase = [urls]
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
assert "train" in downloaded_paths.keys()
lowercase = downloaded_paths.values()
lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCamelCase , __UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase = Path(__UpperCamelCase )
lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase = downloaded_path.read_text()
assert content == CONTENT
lowercase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def A_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ):
lowercase = str(__UpperCamelCase )
if issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = filename
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = [filename]
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
lowercase = {'''train''': filename}
lowercase = '''dummy'''
lowercase = xz_file.parent
lowercase = '''extracted'''
lowercase = DownloadConfig(
cache_dir=__UpperCamelCase , use_etag=__UpperCamelCase , )
lowercase = DownloadManager(dataset_name=__UpperCamelCase , download_config=__UpperCamelCase )
lowercase = dl_manager.extract(__UpperCamelCase )
lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = [extracted_paths]
lowercase = [paths]
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
assert "train" in extracted_paths.keys()
lowercase = extracted_paths.values()
lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCamelCase , __UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase = Path(__UpperCamelCase )
lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCamelCase , etag=__UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase = extracted_path.read_text()
lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def A_ ( __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__UpperCamelCase , start=1 ):
lowercase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
lowercase = request.getfixturevalue(__UpperCamelCase )
lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
_test_jsonl(__UpperCamelCase , __UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def A_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ):
lowercase = request.getfixturevalue(__UpperCamelCase )
lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) , start=1 ):
_test_jsonl(__UpperCamelCase , __UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def A_ ( __UpperCamelCase : Union[str, Any] ):
lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCamelCase ) , start=1 ):
assert os.path.basename(__UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 396 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( snake_case):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''')
# get the generated string sequence
__snake_case = gray_code_sequence_string(snake_case)
#
# convert them to integers
for i in range(len(snake_case)):
__snake_case = int(sequence[i], 2)
return sequence
def SCREAMING_SNAKE_CASE ( snake_case):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1)
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2):
__snake_case = '''0''' + smaller_sequence[i]
sequence.append(snake_case)
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2)):
__snake_case = '''1''' + smaller_sequence[i]
sequence.append(snake_case)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 564 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Dict:
__snake_case = 0
def lowercase ( self : Tuple ) -> str:
__snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : List[str] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case = AutoImageProcessor.from_pretrained(A_ ).to_dict()
config_dict.pop('''image_processor_type''' )
__snake_case = CLIPImageProcessor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
A_ , '''clip-base is not a local folder and is not a valid model identifier''' ):
__snake_case = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase ( self : str ) -> Any:
with self.assertRaisesRegex(
A_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = AutoImageProcessor.from_pretrained(A_ , revision='''aaaaaa''' )
def lowercase ( self : int ) -> Any:
with self.assertRaisesRegex(
A_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase ( self : Dict ) -> Optional[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ , trust_remote_code=A_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoImageProcessor.register(A_ , A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(A_ ) / '''preprocessor_config.json'''
__snake_case = Path(A_ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(A_ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(A_ , '''w''' ) )
__snake_case = CustomImageProcessor.from_pretrained(A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
__snake_case = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ , A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self : int ) -> List[Any]:
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : str = True
try:
AutoConfig.register('''custom''' , A_ )
AutoImageProcessor.register(A_ , A_ )
# If remote code is not set, the default is to use local
__snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(A_ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 564 | 1 |
def A ( UpperCAmelCase ):
assert (
isinstance(UpperCAmelCase , UpperCAmelCase ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
_snake_case , _snake_case : Union[str, Any] = 1, 1
for _ in range(number_of_steps - 1 ):
_snake_case , _snake_case : Dict = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 278 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase :str = logging.get_logger(__name__)
class _a( __A ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __snake_case , )
super().__init__(*__snake_case , **__snake_case ) | 278 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """speech_to_text"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case_=1_0000 , snake_case_=12 , snake_case_=2048 , snake_case_=4 , snake_case_=6 , snake_case_=2048 , snake_case_=4 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=256 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0_2 , snake_case_=2 , snake_case_=True , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=6000 , snake_case_=1024 , snake_case_=2 , snake_case_=(5, 5) , snake_case_=1024 , snake_case_=80 , snake_case_=1 , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: int = vocab_size
__UpperCAmelCase: Union[str, Any] = d_model
__UpperCAmelCase: Optional[int] = encoder_ffn_dim
__UpperCAmelCase: Union[str, Any] = encoder_layers
__UpperCAmelCase: Union[str, Any] = encoder_attention_heads
__UpperCAmelCase: Optional[Any] = decoder_ffn_dim
__UpperCAmelCase: Tuple = decoder_layers
__UpperCAmelCase: Optional[int] = decoder_attention_heads
__UpperCAmelCase: Union[str, Any] = dropout
__UpperCAmelCase: int = attention_dropout
__UpperCAmelCase: Union[str, Any] = activation_dropout
__UpperCAmelCase: str = activation_function
__UpperCAmelCase: Any = init_std
__UpperCAmelCase: List[str] = encoder_layerdrop
__UpperCAmelCase: Optional[Any] = decoder_layerdrop
__UpperCAmelCase: List[str] = use_cache
__UpperCAmelCase: List[Any] = encoder_layers
__UpperCAmelCase: Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase: Any = max_source_positions
__UpperCAmelCase: Optional[int] = max_target_positions
__UpperCAmelCase: Optional[int] = num_conv_layers
__UpperCAmelCase: Optional[int] = list(snake_case_ )
__UpperCAmelCase: Any = conv_channels
__UpperCAmelCase: Any = input_feat_per_channel
__UpperCAmelCase: Any = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , ) | 523 | '''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Optional[int] = Generator(
cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , )
def lowercase_ ( self ):
'''simple docstring'''
if self.streaming:
__UpperCAmelCase: List[str] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: Union[str, Any] = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__UpperCAmelCase: List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset | 523 | 1 |
lowerCamelCase ={
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''realm'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=1e-3 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=1_3_3_5_3_7_1_8 , __SCREAMING_SNAKE_CASE=5_0_0_0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# Common config
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : Union[str, Any] = retriever_proj_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = num_attention_heads
UpperCamelCase__ : str = num_candidates
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : List[Any] = layer_norm_eps
# Reader config
UpperCamelCase__ : List[Any] = span_hidden_size
UpperCamelCase__ : List[Any] = max_span_width
UpperCamelCase__ : Optional[Any] = reader_layer_norm_eps
UpperCamelCase__ : Optional[Any] = reader_beam_size
UpperCamelCase__ : int = reader_seq_len
# Retrieval config
UpperCamelCase__ : List[str] = num_block_records
UpperCamelCase__ : Union[str, Any] = searcher_beam_size
| 462 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a : Dict = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def _lowerCAmelCase ( lowercase ) -> str:
__lowerCAmelCase = test_results.split(""" """ )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowerCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
__lowerCAmelCase = {}
__lowerCAmelCase = None
__lowerCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
__lowerCAmelCase = line
__lowerCAmelCase = False
return failures
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = title
__lowerCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
__lowerCAmelCase = doc_test_results["""success"""]
__lowerCAmelCase = doc_test_results["""failures"""]
__lowerCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowerCAmelCase = doc_test_results
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [self._time_spent]
__lowerCAmelCase = 0
for time in time_spent:
__lowerCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase = [0, 0, time_parts[0]]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__SCREAMING_SNAKE_CASE )}h{int(__SCREAMING_SNAKE_CASE )}m{int(__SCREAMING_SNAKE_CASE )}s'
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 40
__lowerCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )}
__lowerCAmelCase = """"""
for category, failures in category_failures.items():
if len(__SCREAMING_SNAKE_CASE ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__SCREAMING_SNAKE_CASE )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__SCREAMING_SNAKE_CASE )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""],text="""There was an issue running the tests.""",blocks=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self ):
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
__lowerCAmelCase = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed."""
__lowerCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""],blocks=self.payload,text=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """"""
for key, value in failures.items():
__lowerCAmelCase = value[:2_00] + """ [Truncated]""" if len(__SCREAMING_SNAKE_CASE ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
__lowerCAmelCase = job_name
__lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
__lowerCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase__ ( self ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
__lowerCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
__lowerCAmelCase = sorted(self.doc_test_results.items(),key=lambda __SCREAMING_SNAKE_CASE : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
__lowerCAmelCase = f'*Num failures* :{len(job_result["failed"] )} \n'
__lowerCAmelCase = job_result["""failures"""]
__lowerCAmelCase = self.get_reply_blocks(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,text=__SCREAMING_SNAKE_CASE )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""],text=f'Results for {job}',blocks=__SCREAMING_SNAKE_CASE,thread_ts=self.thread_ts["""ts"""],)
time.sleep(1 )
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = os.environ["""GITHUB_RUN_ID"""]
__lowerCAmelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
__lowerCAmelCase = requests.get(lowercase ).json()
__lowerCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase ):
__lowerCAmelCase = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , lowercase )
return {}
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = {}
if os.path.exists(lowercase ):
__lowerCAmelCase = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCAmelCase ( ) -> Any:
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = name
__lowerCAmelCase = []
def __str__( self ):
'''simple docstring'''
return self.name
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
__lowerCAmelCase = {}
__lowerCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowerCAmelCase = directory
if artifact_name not in _available_artifacts:
__lowerCAmelCase = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
_a : List[str] = get_job_links()
_a : str = retrieve_available_artifacts()
_a : Union[str, Any] = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a : Dict = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a : Dict = github_actions_job_links.get("""run_doctests""")
_a : List[str] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
_a : Tuple = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
_a ,_a ,_a : Optional[Any] = handle_test_results(artifact["""stats"""])
_a : List[str] = failed
_a : List[str] = success
_a : Optional[Any] = time_spent[1:-1] + """, """
_a : List[Any] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
_a : List[str] = line.replace("""FAILED """, """""")
_a : List[Any] = line.split()[0].replace("""\n""", """""")
if "::" in line:
_a ,_a : int = line.split("""::""")
else:
_a ,_a : int = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a : Dict = all_failures[test] if test in all_failures else """N/A"""
_a : List[Any] = failure
break
_a : Optional[Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 689 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _lowerCAmelCase ( lowercase ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
__lowerCAmelCase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
__lowerCAmelCase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
__lowerCAmelCase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
__lowerCAmelCase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
__lowerCAmelCase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
__lowerCAmelCase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
__lowerCAmelCase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
__lowerCAmelCase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
__lowerCAmelCase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
__lowerCAmelCase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__lowerCAmelCase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__lowerCAmelCase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__lowerCAmelCase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
__lowerCAmelCase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
__lowerCAmelCase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
__lowerCAmelCase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
__lowerCAmelCase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _lowerCAmelCase ( lowercase , lowercase ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase , __lowerCAmelCase = int(key_split[2] ), int(key_split[4] )
__lowerCAmelCase = config.vision_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = config.text_config.hidden_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
else:
__lowerCAmelCase = rename_key(lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__lowerCAmelCase = val.squeeze_()
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCAmelCase ( ) -> str:
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase="groupvit-gcc-yfcc" , lowercase=False ) -> List[Any]:
__lowerCAmelCase = GroupViTConfig()
__lowerCAmelCase = GroupViTModel(lowercase ).eval()
__lowerCAmelCase = torch.load(lowercase , map_location="""cpu""" )["""model"""]
__lowerCAmelCase = convert_state_dict(lowercase , lowercase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowercase , strict=lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase ) == 0)
# verify result
__lowerCAmelCase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase , padding=lowercase , return_tensors="""pt""" )
with torch.no_grad():
__lowerCAmelCase = model(**lowercase )
if model_name == "groupvit-gcc-yfcc":
__lowerCAmelCase = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__lowerCAmelCase = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowercase , atol=1e-3 )
processor.save_pretrained(lowercase )
model.save_pretrained(lowercase )
print("""Successfully saved processor and model to""" , lowercase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase , organization="""nielsr""" )
model.push_to_hub(lowercase , organization="""nielsr""" )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_a : List[str] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=False ):
"""simple docstring"""
_lowercase : Any = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ):
"""simple docstring"""
if conf_path is None:
_lowercase : Optional[int] = './model_checkpoints/vqgan_only.yaml'
_lowercase : int = load_config(__UpperCAmelCase ,display=__UpperCAmelCase )
_lowercase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowercase : Optional[int] = './model_checkpoints/vqgan_only.pt'
_lowercase : str = torch.load(__UpperCAmelCase ,map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
_lowercase : Union[str, Any] = sd['state_dict']
model.load_state_dict(__UpperCAmelCase ,strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
_lowercase , _lowercase , _lowercase : Any = model.encode(__UpperCAmelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
_lowercase : Optional[int] = model.decode(__UpperCAmelCase )
return xrec
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase=False ):
"""simple docstring"""
_lowercase , _lowercase : Dict = string.rsplit('.' ,1 )
if reload:
_lowercase : List[Any] = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase ,package=__UpperCAmelCase ) ,cls )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' ,{} ) )
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ):
"""simple docstring"""
_lowercase : List[Any] = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if ckpt:
_lowercase : Dict = torch.load(__UpperCAmelCase ,map_location='cpu' )
_lowercase : Optional[Any] = pl_sd['global_step']
print(F'''loaded model from global step {global_step}.''' )
else:
_lowercase : List[str] = {'state_dict': None}
_lowercase : List[str] = None
_lowercase : List[Any] = load_model_from_config(config.model ,pl_sd['state_dict'] ,gpu=__UpperCAmelCase ,eval_mode=__UpperCAmelCase )['model']
return model, global_step
| 283 | """simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,):
"""simple docstring"""
_lowercase : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
_lowercase : Optional[Any] = 1
_lowercase : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
_lowercase : List[Any] = init[0]
_lowercase : Optional[Any] = init[1]
_lowercase : Union[str, Any] = 0
_lowercase : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase : Dict = [[f, g, x, y]]
_lowercase : Tuple = False # flag that is set when search is complete
_lowercase : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase : List[Any] = cell.pop()
_lowercase : Optional[Any] = next_cell[2]
_lowercase : List[str] = next_cell[3]
_lowercase : Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase : Optional[Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
_lowercase : Any = x + DIRECTIONS[i][0]
_lowercase : Union[str, Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase : int = g + cost
_lowercase : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase : List[Any] = 1
_lowercase : Dict = i
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = goal[0]
_lowercase : Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase : str = x - DIRECTIONS[action[x][y]][0]
_lowercase : Any = y - DIRECTIONS[action[x][y]][1]
_lowercase : Dict = xa
_lowercase : Tuple = ya
invpath.append([x, y] )
_lowercase : List[str] = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 283 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.