code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections import deque
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = process_name # process name
_lowercase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase : Tuple = arrival_time
_lowercase : Any = burst_time # remaining burst time
_lowercase : Optional[int] = 0 # total time of the process wait in ready queue
_lowercase : Union[str, Any] = 0 # time from arrival time to completion time
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase : str = time_slices
# unfinished process is in this ready_queue
_lowercase : Optional[Any] = queue
# current time
_lowercase : Union[str, Any] = current_time
# finished process is in this sequence queue
_lowercase : deque[Process] = deque()
def UpperCamelCase ( self) -> list[str]:
"""simple docstring"""
_lowercase : List[Any] = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Tuple = []
for i in range(len(lowerCamelCase)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
completion_times.append(queue[i].stop_time)
return completion_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase ( self, lowerCamelCase) -> deque[Process]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of finished process
while len(lowerCamelCase) != 0:
_lowercase : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase : Optional[int] = 0
# set the process's turnaround time because it is finished
_lowercase : str = self.current_time - cp.arrival_time
# set the completion time
_lowercase : Any = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase)):
_lowercase : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase : str = 0
# set the finish time
_lowercase : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_lowercase : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase ( self) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1):
_lowercase , _lowercase : str = self.round_robin(
self.ready_queue, self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE : Union[str, Any] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : str = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : List[str] = [17, 25]
SCREAMING_SNAKE_CASE : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE : List[str] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : List[str] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Tuple = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = [17, 25]
SCREAMING_SNAKE_CASE : Optional[Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE : str = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 21 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = CTRLTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : str = False
def lowercase ( self : List[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_UpperCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_UpperCAmelCase = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_UpperCAmelCase = {"unk_token": "<unk>"}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
def lowercase ( self : List[Any] , **snake_case_ : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Dict ):
_UpperCAmelCase = "adapt react readapt apt"
_UpperCAmelCase = "adapt react readapt apt"
return input_text, output_text
def lowercase ( self : str ):
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = "adapt react readapt apt"
_UpperCAmelCase = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_UpperCAmelCase = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
'''simple docstring'''
from math import factorial
UpperCamelCase__: Union[str, Any] = {str(d): factorial(d) for d in range(10)}
def snake_case_ ( _lowerCAmelCase : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(_lowerCAmelCase ) )
def snake_case_ ( ) -> int:
UpperCAmelCase : Tuple = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , _lowerCAmelCase ) if sum_of_digit_factorial(_lowerCAmelCase ) == i )
if __name__ == "__main__":
print(F"{solution() = }")
| 23 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( snake_case_ : Tuple=None ) -> Optional[int]:
__snake_case = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ )
# The main config parser
__snake_case = config_command_parser(snake_case_ )
# The subparser to add commands to
__snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(snake_case_ , parents=[parent_parser] )
update_command_parser(snake_case_ , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Optional[int]:
__snake_case = get_config_parser()
__snake_case = config_parser.parse_args()
if not hasattr(snake_case_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 24 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
snake_case_ : str = int(lowercase_ )
snake_case_ : Any = dict(sorted(self.labels.items() ) )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Tuple = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : Any = len(lowercase_ )
snake_case_ : List[str] = self.transformer.config.sample_size
snake_case_ : Union[str, Any] = self.transformer.config.in_channels
snake_case_ : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ : Optional[int] = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
snake_case_ : Dict = torch.tensor([1000] * batch_size , device=self.device )
snake_case_ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
snake_case_ : Union[str, Any] = torch.cat([half, half] , dim=0 )
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : int = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
snake_case_ : Optional[int] = torch.intaa if is_mps else torch.intaa
snake_case_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ : List[Any] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_, snake_case_ : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_, snake_case_ : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
snake_case_ : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ : str = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_, snake_case_ : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
snake_case_ : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case_, snake_case_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ : Dict = latent_model_input
snake_case_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
snake_case_ : Tuple = self.vae.decode(lowercase_ ).sample
snake_case_ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 264 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''ibert'''
def __init__(self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="none" , **SCREAMING_SNAKE_CASE__ , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE__ : Dict = force_dequant
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 25 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase ( UpperCamelCase__ ):
_a = "visual_bert"
def __init__( self , _a=3_0522 , _a=768 , _a=512 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , **_a , ) -> Tuple:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : int = vocab_size
_A : Dict = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[Any] = visual_embedding_dim
_A : Optional[Any] = num_hidden_layers
_A : Tuple = num_attention_heads
_A : str = intermediate_size
_A : Dict = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = initializer_range
_A : List[Any] = type_vocab_size
_A : int = layer_norm_eps
_A : Optional[int] = bypass_transformer
_A : List[Any] = special_visual_initialize
| 26 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
__a : List[str] = list(_SCREAMING_SNAKE_CASE )
__a : List[Any] = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
__a : Any = '_'
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] ):
__a : Any = []
while True:
__a : Union[str, Any] = ['$'] * len(_SCREAMING_SNAKE_CASE )
__a : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
__a : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
__a : Dict = '*'
__a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
__a : Union[str, Any] = list(set(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ):
__a : Optional[Any] = []
for minterm in minterms:
__a : List[Any] = ''
for _ in range(_SCREAMING_SNAKE_CASE ):
__a : List[str] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
__a : List[str] = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = list(_SCREAMING_SNAKE_CASE )
__a : Tuple = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ):
__a : int = []
__a : str = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
__a : Any = 0
__a : Union[str, Any] = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
__a : Optional[int] = j
if count == 1:
__a : Optional[Any] = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
__a : Any = 0
__a : Any = -1
__a : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : Any = chart[i].count(1 )
if count_n > max_n:
__a : str = count_n
__a : Any = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[str] = 0
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ):
__a : int = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : Union[str, Any] = prime_implicants[i].count('_' )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
__a : int = 1
return chart
def lowerCamelCase ():
__a : Any = int(input('Enter the no. of variables\n' ) )
__a : Union[str, Any] = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__a : List[str] = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = check(_SCREAMING_SNAKE_CASE )
print('Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Essential Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 27 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCamelCase : int = TypeVar("_T")
class SCREAMING_SNAKE_CASE ( Generic[_T] ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Iterable[_T] | None = None ):
"""simple docstring"""
UpperCamelCase = list(iterable or [] )
UpperCamelCase = []
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def A ( self : List[Any] , UpperCamelCase__ : _T ):
"""simple docstring"""
self._stacka.append(UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self._stacka.pop
UpperCamelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 28 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (720, 1280) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 100
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 250
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_dataset(__snake_case , __snake_case )
for index in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = random.sample(range(len(__snake_case ) ) , 4 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = update_image_and_anno(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ : Optional[Any] = random_chars(32 )
UpperCAmelCase_ : Optional[int] = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCAmelCase_ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
UpperCAmelCase_ : Optional[Any] = []
for anno in new_annos:
UpperCAmelCase_ : str = anno[3] - anno[1]
UpperCAmelCase_ : Any = anno[4] - anno[2]
UpperCAmelCase_ : Any = anno[1] + width / 2
UpperCAmelCase_ : Dict = anno[2] + height / 2
UpperCAmelCase_ : Union[str, Any] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__snake_case )
with open(F"{file_root}.txt" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
for label_file in glob.glob(os.path.join(__snake_case , '*.txt' ) ):
UpperCAmelCase_ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__snake_case ) as in_file:
UpperCAmelCase_ : str = in_file.readlines()
UpperCAmelCase_ : Optional[int] = os.path.join(__snake_case , F"{label_name}.jpg" )
UpperCAmelCase_ : List[Any] = []
for obj_list in obj_lists:
UpperCAmelCase_ : str = obj_list.rstrip('\n' ).split(' ' )
UpperCAmelCase_ : str = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase_ : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase_ : Any = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase_ : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def lowercase__ ( __snake_case : list , __snake_case : list , __snake_case : list[int] , __snake_case : tuple[int, int] , __snake_case : tuple[float, float] , __snake_case : float = 0.0 , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : Any = int(scale_x * output_size[1] )
UpperCAmelCase_ : List[str] = int(scale_y * output_size[0] )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i, index in enumerate(__snake_case ):
UpperCAmelCase_ : Any = all_img_list[index]
path_list.append(__snake_case )
UpperCAmelCase_ : Optional[Any] = all_annos[index]
UpperCAmelCase_ : Tuple = cva.imread(__snake_case )
if i == 0: # top-left
UpperCAmelCase_ : Any = cva.resize(__snake_case , (divid_point_x, divid_point_y) )
UpperCAmelCase_ : str = img
for bbox in img_annos:
UpperCAmelCase_ : Optional[int] = bbox[1] * scale_x
UpperCAmelCase_ : str = bbox[2] * scale_y
UpperCAmelCase_ : Dict = bbox[3] * scale_x
UpperCAmelCase_ : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase_ : Optional[int] = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase_ : Dict = img
for bbox in img_annos:
UpperCAmelCase_ : Any = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : Dict = bbox[2] * scale_y
UpperCAmelCase_ : Dict = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase_ : Dict = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : Union[str, Any] = img
for bbox in img_annos:
UpperCAmelCase_ : str = bbox[1] * scale_x
UpperCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = bbox[3] * scale_x
UpperCAmelCase_ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase_ : Optional[Any] = cva.resize(
__snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : List[Any] = img
for bbox in img_annos:
UpperCAmelCase_ : List[str] = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : int = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase_ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ : int = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 29 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
for param in module.parameters():
lowercase_ = False
def a ( ):
'''simple docstring'''
lowercase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( snake_case__: int ):
'''simple docstring'''
lowercase_ = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def a ( ):
'''simple docstring'''
lowercase_ = datetime.now()
lowercase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 30 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLTokenizer
__SCREAMING_SNAKE_CASE : List[str] = data_utils.TransfoXLCorpus
__SCREAMING_SNAKE_CASE : str = data_utils
__SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCAmelCase , "rb" ) as fp:
_UpperCAmelCase : Optional[Any] = pickle.load(_UpperCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase : List[str] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase : Any = corpus.vocab.__dict__
torch.save(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _UpperCAmelCase )
_UpperCAmelCase : int = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase : Tuple = os.path.abspath(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.abspath(_UpperCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase : Tuple = TransfoXLConfig()
else:
_UpperCAmelCase : Union[str, Any] = TransfoXLConfig.from_json_file(_UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase : Any = TransfoXLLMHeadModel(_UpperCAmelCase )
_UpperCAmelCase : Tuple = load_tf_weights_in_transfo_xl(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Any = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCAmelCase )}""" )
torch.save(model.state_dict() , _UpperCAmelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCAmelCase )}""" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 31 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
import math
import unittest
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 32 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class _UpperCAmelCase ( _A , _A ):
SCREAMING_SNAKE_CASE_ : List[Any] = "resnet"
SCREAMING_SNAKE_CASE_ : Tuple = ["basic", "bottleneck"]
def __init__( self : Any , A : Tuple=3 , A : str=64 , A : Tuple=[2_56, 5_12, 10_24, 20_48] , A : List[Any]=[3, 4, 6, 3] , A : Union[str, Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Tuple=None , A : int=None , **A : List[str] , ) -> List[Any]:
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowercase_ : List[Any] = num_channels
lowercase_ : Tuple = embedding_size
lowercase_ : Dict = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : Optional[int] = layer_type
lowercase_ : str = hidden_act
lowercase_ : Dict = downsample_in_first_stage
lowercase_ : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
lowercase_ , lowercase_ : List[str] = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : str = version.parse("1.11" )
@property
def A ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Union[str, Any] ) -> float:
return 1e-3
| 33 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ViTFeatureExtractor"]
__a = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264 | 0 |
_snake_case = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for arabic, roman in ROMAN:
((_lowerCAmelCase) , (_lowerCAmelCase)) : List[str] = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 0 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Optional[Any] = config_class
lowerCAmelCase__ : List[str] = has_text_modality
lowerCAmelCase__ : Union[str, Any] = kwargs
lowerCAmelCase__ : str = common_properties
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : int = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCAmelCase ,__UpperCAmelCase ) ,msg=F"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCAmelCase ):
try:
setattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
self.parent.assertEqual(
getattr(__UpperCAmelCase ,__UpperCAmelCase ) ,__UpperCAmelCase ,msg=F"""`{name} value {idx} expected, but was {getattr(__UpperCAmelCase ,__UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCAmelCase ):
try:
lowerCAmelCase__ : str = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCAmelCase ,__UpperCAmelCase ) ,__UpperCAmelCase ,msg=F"""`{name} value {idx} expected, but was {getattr(__UpperCAmelCase ,__UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : str = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Union[str, Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,"""config.json""" )
config_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ : str = self.config_class.from_json_file(__UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.config_class.from_pretrained(__UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Union[str, Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
config_first.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.config_class.from_pretrained(__UpperCAmelCase ,subfolder=__UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Dict = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
lowerCAmelCase__ : List[str] = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def UpperCAmelCase_ ( self ) -> str:
if self.config_class.is_composition:
return
lowerCAmelCase__ : Tuple = self.config_class()
self.parent.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(__UpperCAmelCase )
lowerCAmelCase__ : int = self.config_class(**__UpperCAmelCase )
lowerCAmelCase__ : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(__UpperCAmelCase ,__UpperCAmelCase ) != value:
wrong_values.append((key, getattr(__UpperCAmelCase ,__UpperCAmelCase ), value) )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ : Any = """\n""".join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" )
def UpperCAmelCase_ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 37 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , __lowerCamelCase : CLIPSegForImageSegmentation , __lowerCamelCase : CLIPSegProcessor , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
UpperCamelCase :str = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __lowerCamelCase , standard_warn=__lowerCamelCase )
UpperCamelCase :Optional[Any] = dict(scheduler.config )
UpperCamelCase :Tuple = 1
UpperCamelCase :Dict = FrozenDict(__lowerCamelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase :Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __lowerCamelCase , standard_warn=__lowerCamelCase )
UpperCamelCase :Tuple = dict(scheduler.config )
UpperCamelCase :Any = True
UpperCamelCase :List[str] = FrozenDict(__lowerCamelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__lowerCamelCase , segmentation_processor=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , )
def _A ( self : Dict , __lowerCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def _A ( self : Union[str, Any] ):
self.enable_attention_slicing(__lowerCamelCase )
def _A ( self : Union[str, Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase :Any = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Tuple ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : str , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : List[Any] , ):
UpperCamelCase :Any = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
UpperCamelCase :Any = self.segmentation_model(**__lowerCamelCase )
UpperCamelCase :Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase :Optional[int] = self.numpy_to_pil(__lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase :Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , )
| 38 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( _a , _a ):
# Load checkpoint
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
snake_case_ : Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
snake_case_ : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Union[str, Any] = v
else:
snake_case_ : Dict = v
snake_case_ : Union[str, Any] = chkpt['''params''']
snake_case_ : int = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : int = chkpt['''dico_word2id''']
snake_case_ : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 264 | 0 |
from __future__ import annotations
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase = {}
_UpperCAmelCase = source_vertex
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {self.source_vertex}
_UpperCAmelCase = None
_UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
_UpperCAmelCase = vertex
queue.append(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
_UpperCAmelCase = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_a = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 39 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowercase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowercase ( A_ , A_ , A_=None )-> List[Any]:
'''simple docstring'''
if rng is None:
a : Union[str, Any] = random.Random()
a : Tuple = 1
for dim in shape:
total_dims *= dim
a : Optional[Any] = []
for _ in range(A_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
a : Dict = np.array(A_ , dtype=jnp.intaa ).reshape(A_ )
return output
def lowercase ( A_ , A_=None )-> List[str]:
'''simple docstring'''
a : Optional[int] = ids_tensor(A_ , vocab_size=2 , rng=A_ )
# make sure that at least one token is attended to for each batch
a : Tuple = 1
return attn_mask
@require_flax
class _A :
"""simple docstring"""
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = ()
def __snake_case ( self : Optional[int]):
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a : Dict = 2
a : Tuple = inputs["input_ids"].shape[-1] // 2
a : List[Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
a : str = jnp.ones_like(__UpperCAmelCase)
a : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a : int = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __snake_case ( self : Optional[Any]):
a , a , a , a : Optional[int] = self._get_input_ids_and_config()
a : Union[str, Any] = False
a : str = max_length
a : Dict = 0
for model_class in self.all_generative_model_classes:
a : Union[str, Any] = model_class(__UpperCAmelCase)
a : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
a : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase)
a : int = pt_model_class(__UpperCAmelCase).eval()
a : str = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params)
a : Tuple = flax_model.generate(__UpperCAmelCase).sequences
a : Tuple = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : Dict = self._get_input_ids_and_config()
a : Optional[Any] = False
a : Dict = max_length
for model_class in self.all_generative_model_classes:
a : Optional[int] = model_class(__UpperCAmelCase)
a : List[str] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : str = jit(model.generate)
a : List[str] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : Dict = self._get_input_ids_and_config()
a : Union[str, Any] = True
a : List[Any] = max_length
for model_class in self.all_generative_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : str = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Optional[int] = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Union[str, Any]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
a : List[str] = False
a : str = max_length
a : List[Any] = 2
for model_class in self.all_generative_model_classes:
a : Tuple = model_class(__UpperCAmelCase)
a : Union[str, Any] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : List[Any] = jit(model.generate)
a : Optional[Any] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : int):
a , a , a , a : Any = self._get_input_ids_and_config()
a : int = False
a : str = max_length
a : str = 2
a : Optional[int] = 2
for model_class in self.all_generative_model_classes:
a : str = model_class(__UpperCAmelCase)
a : str = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def __snake_case ( self : List[Any]):
a , a , a , a : Optional[int] = self._get_input_ids_and_config()
a : Any = True
a : List[Any] = max_length
a : Tuple = 0.8
a : int = 10
a : Union[str, Any] = 0.3
a : Any = 1
a : Optional[int] = 8
a : Any = 9
for model_class in self.all_generative_model_classes:
a : int = model_class(__UpperCAmelCase)
a : Any = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Optional[Any] = jit(model.generate)
a : Any = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : str = self._get_input_ids_and_config()
a : Tuple = max_length
a : int = 1
a : int = 8
a : List[Any] = 9
for model_class in self.all_generative_model_classes:
a : List[Any] = model_class(__UpperCAmelCase)
a : Optional[Any] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Any = jit(model.generate)
a : List[str] = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : int = self._get_input_ids_and_config()
a : Optional[Any] = max_length
a : Dict = 2
a : Tuple = 1
a : Optional[Any] = 8
a : Dict = 9
for model_class in self.all_generative_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : List[str] = model.generate(__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Dict = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Optional[int]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
a : Optional[int] = attention_mask.at[(0, 0)].set(0)
a : List[str] = False
a : str = max_length
for model_class in self.all_generative_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : Any = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : List[Any] = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : List[Any]):
a , a , a , a : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
a : Tuple = attention_mask.at[(0, 0)].set(0)
a : Any = True
a : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : Dict = jit(model.generate)
a : Dict = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def __snake_case ( self : Dict):
a , a , a , a : Any = self._get_input_ids_and_config()
# pad attention mask on the left
a : Dict = attention_mask.at[(0, 0)].set(0)
a : str = 2
a : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
a : List[str] = model_class(__UpperCAmelCase)
a : int = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase)
a : str = jit(model.generate)
a : Tuple = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
a : int = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
a : Union[str, Any] = "Hello world"
a : str = tokenizer(__UpperCAmelCase , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , "do_samples"):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , "foo"):
a : Tuple = {"foo": "bar"}
model.generate(__UpperCAmelCase , **__UpperCAmelCase)
| 40 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _lowercase :
def __init__( self: int , UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = str(id_ )
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Dict = None
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : int = {} # {vertex:distance}
def __lt__( self: List[str] , UpperCamelCase__: Dict ):
return self.key < other.key
def __repr__( self: str ):
return self.id
def lowerCamelCase_ ( self: Any , UpperCamelCase__: int ):
self.neighbors.append(UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Any = weight
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list:
lowerCamelCase__ : List[Any] = []
for u in graph:
lowerCamelCase__ : Optional[int] = math.inf
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = graph[:]
while q:
lowerCamelCase__ : Dict = min(UpperCamelCase )
q.remove(UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase__ : str = u
lowerCamelCase__ : Dict = u.edges[v.id]
for i in range(1 , len(UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Iterator[tuple]:
for u in graph:
lowerCamelCase__ : Union[str, Any] = math.inf
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = list(UpperCamelCase )
hq.heapify(UpperCamelCase )
while h:
lowerCamelCase__ : Dict = hq.heappop(UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase__ : int = u
lowerCamelCase__ : Optional[int] = u.edges[v.id]
hq.heapify(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def SCREAMING_SNAKE_CASE_ () -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """ChineseCLIPImageProcessor"""
__lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs.pop('feature_extractor' )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.image_processor
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
_snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
_snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
| 42 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 0 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase = "" , __lowercase = False) -> None:
# Mapping from the first character of the prefix of the node
__UpperCamelCase :dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__UpperCamelCase :str = is_leaf
__UpperCamelCase :Union[str, Any] = prefix
def UpperCamelCase__ ( self , __lowercase) -> tuple[str, str, str]:
__UpperCamelCase :Dict = 0
for q, w in zip(self.prefix , __lowercase):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self , __lowercase) -> None:
for word in words:
self.insert(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCamelCase :Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCamelCase :Optional[int] = RadixNode(prefix=__lowercase , is_leaf=__lowercase)
else:
__UpperCamelCase :Optional[int] = self.nodes[word[0]]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = incoming_node.match(
__lowercase)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__lowercase)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCamelCase :Optional[Any] = remaining_prefix
__UpperCamelCase :str = self.nodes[matching_string[0]]
__UpperCamelCase :Any = RadixNode(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = aux_node
if remaining_word == "":
__UpperCamelCase :List[str] = True
else:
self.nodes[matching_string[0]].insert(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> bool:
__UpperCamelCase :Dict = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> bool:
__UpperCamelCase :List[Any] = self.nodes.get(word[0] , __lowercase)
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = incoming_node.match(
__lowercase)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__lowercase)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
__UpperCamelCase :Dict = list(self.nodes.values())[0]
__UpperCamelCase :Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCamelCase :Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
__UpperCamelCase :int = False
# If there is 1 edge, we merge it with its child
else:
__UpperCamelCase :Union[str, Any] = list(incoming_node.nodes.values())[0]
__UpperCamelCase :Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCamelCase :Any = merging_node.nodes
return True
def UpperCamelCase__ ( self , __lowercase = 0) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''')
for value in self.nodes.values():
value.print_tree(height + 1)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Any = '''banana bananas bandana band apple all beast'''.split()
__UpperCamelCase :Any = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Any = RadixNode()
__UpperCamelCase :Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('''Words:''' , SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 43 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Union[str, Any] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"google/realm-cc-news-pretrained-embedder": 5_1_2,
"google/realm-cc-news-pretrained-encoder": 5_1_2,
"google/realm-cc-news-pretrained-scorer": 5_1_2,
"google/realm-cc-news-pretrained-openqa": 5_1_2,
"google/realm-orqa-nq-openqa": 5_1_2,
"google/realm-orqa-nq-reader": 5_1_2,
"google/realm-orqa-wq-openqa": 5_1_2,
"google/realm-orqa-wq-reader": 5_1_2,
}
lowercase_ = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = RealmTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
__a = getattr(_a , normalizer_state.pop('''type''' ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**_a )
__a = do_lower_case
def __UpperCAmelCase ( self , _a , **_a ):
__a = PaddingStrategy.MAX_LENGTH
__a = text
__a = kwargs.pop('''text_pair''' , _a )
__a = kwargs.pop('''return_tensors''' , _a )
__a = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
__a = batch_text_pair[idx]
else:
__a = None
__a = super().__call__(_a , _a , return_tensors=_a , **_a )
__a = encoded_candidates.get('''input_ids''' )
__a = encoded_candidates.get('''attention_mask''' )
__a = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
__a = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def __UpperCAmelCase ( self , _a , _a=None ):
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 45 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = np.max(SCREAMING_SNAKE_CASE , axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self , **lowercase ) -> Optional[int]:
lowerCAmelCase = {}
if "second_text" in kwargs:
lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _snake_case ( self , lowercase , lowercase=None ) -> List[Any]:
return self.tokenizer(lowercase , text_pair=lowercase , return_tensors=self.framework )
def _snake_case ( self , lowercase ) -> Optional[int]:
return self.model(**lowercase )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = model_outputs.logits[0].numpy()
lowerCAmelCase = softmax(lowercase )
lowerCAmelCase = np.argmax(lowercase )
lowerCAmelCase = self.model.config.idalabel[best_class]
lowerCAmelCase = probabilities[best_class].item()
lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
snake_case_ : str = int(lowercase_ )
snake_case_ : Any = dict(sorted(self.labels.items() ) )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Tuple = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : Any = len(lowercase_ )
snake_case_ : List[str] = self.transformer.config.sample_size
snake_case_ : Union[str, Any] = self.transformer.config.in_channels
snake_case_ : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ : Optional[int] = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
snake_case_ : Dict = torch.tensor([1000] * batch_size , device=self.device )
snake_case_ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
snake_case_ : Union[str, Any] = torch.cat([half, half] , dim=0 )
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : int = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
snake_case_ : Optional[int] = torch.intaa if is_mps else torch.intaa
snake_case_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ : List[Any] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_, snake_case_ : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_, snake_case_ : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
snake_case_ : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ : str = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_, snake_case_ : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
snake_case_ : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case_, snake_case_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ : Dict = latent_model_input
snake_case_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
snake_case_ : Tuple = self.vae.decode(lowercase_ ).sample
snake_case_ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 264 | 0 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase : List[Any] = "sshleifer/mar_enro_6_3_student"
class A__ ( A__ ):
def A ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def A ( self : List[Any] ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['finetune.py'] + bash_script.split() + args
with patch.object(_a , 'argv' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['val'][0]
_SCREAMING_SNAKE_CASE =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='cpu' )
_SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class A__ ( A__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_SCREAMING_SNAKE_CASE =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('--fp16' , '' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['distillation.py']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
f"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_a , 'argv' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['val'][0]
_SCREAMING_SNAKE_CASE =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('.ckpt' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='cpu' )
_SCREAMING_SNAKE_CASE ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 47 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir('fixtures')
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Optional[Any] = mock.Mock()
lowerCamelCase : Union[str, Any] = 500
lowerCamelCase : str = {}
lowerCamelCase : Any = HTTPError
lowerCamelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls ) -> Optional[Any]:
lowerCamelCase : Any = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def _lowercase ( cls ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowercase ( self ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
lowerCamelCase : List[str] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 48 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_UpperCAmelCase : Dict = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
_UpperCAmelCase : List[str] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ConvBertTokenizer
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple="[UNK]" , UpperCAmelCase : Optional[int]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : List[Any]="[CLS]" , UpperCAmelCase : Union[str, Any]="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : int=None , **UpperCAmelCase : Union[str, Any] , ) -> List[str]:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : Dict = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : Union[str, Any] = tokenize_chinese_chars
lowerCamelCase__ : Tuple = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = do_lower_case
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any=None ) -> Dict:
lowerCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 50 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : int = TextToVideoSDPipeline
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__ : Tuple = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : List[str]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = '''np'''
UpperCAmelCase_ = sd_pipe(**_snake_case).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=3e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case , expected_max_diff=1e-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = pipe.to('''cuda''')
UpperCAmelCase_ = '''Spiderman is surfing'''
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type='''pt''').frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
UpperCAmelCase_ = pipe.to('''cuda''')
UpperCAmelCase_ = '''Spiderman is surfing'''
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe(_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''pt''').frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5e-2
| 51 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Any:
UpperCamelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase : Tuple = ""
else:
UpperCamelCase : Dict = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase : str = in_proj_bias[: config.hidden_size]
UpperCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[Any] = dct.pop(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = val
def A_ ( ) -> Optional[Any]:
UpperCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : str = DeiTConfig()
# all deit models have fine-tuned heads
UpperCamelCase : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase : List[str] = 1000
UpperCamelCase : int = "huggingface/label-files"
UpperCamelCase : Optional[int] = "imagenet-1k-id2label.json"
UpperCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase : List[str] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase : Tuple = idalabel
UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase : int = int(deit_name[-6:-4] )
UpperCamelCase : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCamelCase : Dict = 192
UpperCamelCase : Optional[int] = 768
UpperCamelCase : List[str] = 12
UpperCamelCase : Any = 3
elif deit_name[9:].startswith("small" ):
UpperCamelCase : Tuple = 384
UpperCamelCase : Union[str, Any] = 1536
UpperCamelCase : Optional[Any] = 12
UpperCamelCase : Union[str, Any] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCamelCase : Dict = 1024
UpperCamelCase : Optional[int] = 4096
UpperCamelCase : str = 24
UpperCamelCase : str = 16
# load original model from timm
UpperCamelCase : str = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase : str = timm_model.state_dict()
UpperCamelCase : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCamelCase : List[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCamelCase : Dict = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCamelCase : Union[str, Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
UpperCamelCase : List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCamelCase : List[Any] = encoding["pixel_values"]
UpperCamelCase : Tuple = model(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 52 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : List[str]="" , __A : List[Any]="train" ):
assert os.path.isdir(__A )
__UpperCamelCase = []
__UpperCamelCase = os.listdir(__A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__UpperCamelCase = os.path.join(__A , __A )
if not os.path.isfile(__A ):
continue
self.documents.append(__A )
def __len__( self : Dict ):
return len(self.documents )
def __getitem__( self : Optional[int] , __A : Optional[int] ):
__UpperCamelCase = self.documents[idx]
__UpperCamelCase = document_path.split('/' )[-1]
with open(__A , encoding='utf-8' ) as source:
__UpperCamelCase = source.read()
__UpperCamelCase , __UpperCamelCase = process_story(__A )
return document_name, story_lines, summary_lines
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
__UpperCamelCase = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
__UpperCamelCase = []
__UpperCamelCase = deque(__lowercase )
while True:
try:
__UpperCamelCase = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__UpperCamelCase = list(filter(lambda __lowercase : not t.startswith('@highlight' ) , __lowercase ) )
return story_lines, summary_lines
def lowercase__ ( __lowercase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowercase__ ( __lowercase : str , __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def lowercase__ ( __lowercase : List[Any] , __lowercase : int ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = torch.ones_like(__lowercase )
__UpperCamelCase = sequence == pad_token_id
__UpperCamelCase = 0
return mask
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int , __lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = [tokenizer.encode(__lowercase ) for line in story_lines]
__UpperCamelCase = [token for sentence in story_lines_token_ids for token in sentence]
__UpperCamelCase = [tokenizer.encode(__lowercase ) for line in summary_lines]
__UpperCamelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowercase__ ( __lowercase : Dict , __lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
__UpperCamelCase = []
for sequence in batch:
__UpperCamelCase = -1
__UpperCamelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 53 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
"""simple docstring"""
a__ : Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a__ : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a__ : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 54 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ):
# Initialise PyTorch model
lowerCamelCase_ = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 55 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class a ( _lowerCamelCase ):
def __init__( self : str , lowercase_ : Union[str, "sqlalchemy.sql.Selectable"] , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , **lowercase_ : Optional[int] , ):
super().__init__(features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , **lowercase_ )
snake_case_ = Sql(
cache_dir=lowercase_ , features=lowercase_ , sql=lowercase_ , con=lowercase_ , **lowercase_ , )
def A_ ( self : List[Any] ):
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , )
# Build dataset for splits
snake_case_ = self.builder.as_dataset(
split='''train''' , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class a :
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : str , lowercase_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
snake_case_ = dataset
snake_case_ = name
snake_case_ = con
snake_case_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ = num_proc
snake_case_ = to_sql_kwargs
def A_ ( self : str ):
snake_case_ = self.to_sql_kwargs.pop('''sql''' , lowercase_ )
snake_case_ = self.to_sql_kwargs.pop('''con''' , lowercase_ )
snake_case_ = self.to_sql_kwargs.pop('''index''' , lowercase_ )
snake_case_ = self._write(index=lowercase_ , **self.to_sql_kwargs )
return written
def A_ ( self : List[str] , lowercase_ : Any ):
snake_case_ ,snake_case_ ,snake_case_ = args
snake_case_ = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
snake_case_ = query_table(
table=self.dataset.data , key=slice(lowercase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ = batch.to_pandas()
snake_case_ = df.to_sql(self.name , self.con , index=lowercase_ , **lowercase_ )
return num_rows or len(lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
snake_case_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case_ ,snake_case_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowercase_ , lowercase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 56 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 0 |
"""simple docstring"""
import os
def _lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + "/p022_names.txt" ) as file:
__lowerCAmelCase = str(file.readlines()[0] )
__lowerCAmelCase = names.replace("\"" , "" ).split("," )
names.sort()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
__lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 57 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowercase_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowercase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowercase_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowercase_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowercase_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_MAPPING
lowercase_ = auto_class_update(FlaxAutoModel)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a_ ( _BaseAutoModelClass ):
'''simple docstring'''
UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 58 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( A_ ):
A__ : Dict = (DDIMParallelScheduler,)
A__ : Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _SCREAMING_SNAKE_CASE (self : Tuple , **snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case__ )
return config
def _SCREAMING_SNAKE_CASE (self : Dict , **snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : List[Any] = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(**snake_case__ )
snake_case : Any = scheduler_class(**snake_case__ )
snake_case , snake_case : Union[str, Any] = 10, 0.0
snake_case : List[Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for t in scheduler.timesteps:
snake_case : Optional[int] = model(snake_case__ , snake_case__ )
snake_case : List[str] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case__ )
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> Any:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=snake_case__ , num_inference_steps=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case__ , eta=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.scheduler_classes[0]
snake_case : Tuple = self.get_scheduler_config()
snake_case : Dict = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : List[Any] = self.get_scheduler_config()
snake_case : int = scheduler_class(**snake_case__ )
snake_case , snake_case : Any = 10, 0.0
scheduler.set_timesteps(snake_case__ )
snake_case : Optional[Any] = self.dummy_model()
snake_case : str = self.dummy_sample_deter
snake_case : Dict = self.dummy_sample_deter + 0.1
snake_case : Dict = self.dummy_sample_deter - 0.1
snake_case : Optional[Any] = samplea.shape[0]
snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case : Tuple = torch.arange(snake_case__ )[0:3, None].repeat(1 , snake_case__ )
snake_case : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case : List[str] = scheduler.batch_step_no_noise(snake_case__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case__ )
snake_case : Dict = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = self.full_loop()
snake_case : Optional[Any] = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case : int = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : str = torch.sum(torch.abs(snake_case__ ) )
snake_case : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = self.full_loop(set_alpha_to_one=snake_case__ , beta_start=0.01 )
snake_case : Tuple = torch.sum(torch.abs(snake_case__ ) )
snake_case : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 59 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 | 0 |
"""simple docstring"""
def _snake_case ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
snake_case__ : Optional[Any] = generate_large_matrix()
snake_case__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _snake_case ( _snake_case : list[list[int]] ):
assert all(row == sorted(_snake_case , reverse=_snake_case ) for row in grid )
assert all(list(_snake_case ) == sorted(_snake_case , reverse=_snake_case ) for col in zip(*_snake_case ) )
def _snake_case ( _snake_case : list[int] ):
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = len(_snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase : str = (left + right) // 2
lowerCAmelCase : Optional[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase : List[Any] = mid + 1
else:
lowerCAmelCase : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_snake_case )
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Any = 0
lowerCAmelCase : Tuple = len(grid[0] )
for i in range(len(_snake_case ) ):
lowerCAmelCase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_snake_case ) * len(grid[0] )) - total
def _snake_case ( _snake_case : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _snake_case ( _snake_case : list[list[int]] ):
lowerCAmelCase : Optional[int] = 0
for row in grid:
for i, number in enumerate(_snake_case ):
if number < 0:
total += len(_snake_case ) - i
break
return total
def _snake_case ( ):
from timeit import timeit
print('''Running benchmarks''' )
lowerCAmelCase : Optional[int] = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase : Any = timeit(f'''{func}(grid=grid)''' , setup=_snake_case , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 60 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( _a , _a ):
# Load checkpoint
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
snake_case_ : Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
snake_case_ : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Union[str, Any] = v
else:
snake_case_ : Dict = v
snake_case_ : Union[str, Any] = chkpt['''params''']
snake_case_ : int = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : int = chkpt['''dico_word2id''']
snake_case_ : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 264 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'nielsr/canine-s': 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
_a = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_a = 0
_a = 0xe000
_a = 0xe001
_a = 0xe002
_a = 0xe003
_a = 0xe004
# Maps special codepoints to human-readable names.
_a = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_a = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=False , lowercase_=2048 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
UpperCAmelCase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
UpperCAmelCase_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , model_max_length=lowercase_ , **lowercase_ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase_ : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase_ : str = UNICODE_VOCAB_SIZE
UpperCAmelCase_ : Optional[int] = len(self._special_codepoints )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return list(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
try:
return ord(lowercase_ )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase_ )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return "".join(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase_ : Any = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase_ )) + [1]
return result
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Union[str, Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
return ()
| 61 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : set , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : PriorityQueue , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__UpperCamelCase =cst_fwd.get(SCREAMING_SNAKE_CASE__ , np.inf )
__UpperCamelCase =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__UpperCamelCase =new_cost_f
__UpperCamelCase =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__UpperCamelCase =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ):
__UpperCamelCase =-1
__UpperCamelCase =set()
__UpperCamelCase =set()
__UpperCamelCase ={source: 0}
__UpperCamelCase ={destination: 0}
__UpperCamelCase ={source: None}
__UpperCamelCase ={destination: None}
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =PriorityQueue()
__UpperCamelCase =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__UpperCamelCase , __UpperCamelCase =queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =pass_and_relaxation(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__UpperCamelCase =shortest_distance
return shortest_path_distance
_A = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_A = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , __a : int = 7_68 , ):
super().__init__()
_a = nn.Parameter(torch.zeros(1 , __a ) )
_a = nn.Parameter(torch.ones(1 , __a ) )
def UpperCamelCase__ ( self : List[str] , __a : Optional[Union[str, torch.device]] = None , __a : Optional[torch.dtype] = None , ):
_a = nn.Parameter(self.mean.to(__a ).to(__a ) )
_a = nn.Parameter(self.std.to(__a ).to(__a ) )
return self
def UpperCamelCase__ ( self : Union[str, Any] , __a : int ):
_a = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase__ ( self : Optional[Any] , __a : Union[str, Any] ):
_a = (embeds * self.std) + self.mean
return embeds
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "ViTImageProcessor"
lowercase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self: str, a_: Optional[int]=None, a_: Dict=None, **a_: Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : Tuple = kwargs.pop("""feature_extractor""" )
_snake_case : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
def __call__( self: List[str], a_: Union[str, Any]=None, a_: Optional[int]=None, a_: List[str]=None, a_: int=None, **a_: Dict ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
_snake_case : Dict = self.tokenizer(a_, return_tensors=a_, **a_ )
if visual_prompt is not None:
_snake_case : List[Any] = self.image_processor(a_, return_tensors=a_, **a_ )
if images is not None:
_snake_case : Tuple = self.image_processor(a_, return_tensors=a_, **a_ )
if visual_prompt is not None and images is not None:
_snake_case : Any = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_snake_case : Optional[Any] = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ), tensor_type=a_ )
def UpperCamelCase_ ( self: List[Any], *a_: Tuple, **a_: Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: Any, *a_: Union[str, Any], **a_: str ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = ''
__UpperCAmelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__UpperCAmelCase : str = None # compression type in fsspec. ex: "gzip"
__UpperCAmelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self : List[str] , __UpperCAmelCase : str = "" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , **__UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(self , **__UpperCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ = fsspec.open(
__UpperCAmelCase , mode="rb" , protocol=__UpperCAmelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ = None
@classmethod
def lowercase_ (cls : int , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super()._strip_protocol(__UpperCAmelCase ).lstrip("/" )
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
if self.dir_cache is None:
UpperCAmelCase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase__ = {f["name"]: f}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
return self.file.open().read()
def lowercase_ (self : Any , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self._strip_protocol(__UpperCAmelCase )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'bz2'
__UpperCAmelCase : Dict = 'bz2'
__UpperCAmelCase : Dict = '.bz2'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'gzip'
__UpperCAmelCase : Optional[Any] = 'gzip'
__UpperCAmelCase : Union[str, Any] = '.gz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'lz4'
__UpperCAmelCase : str = 'lz4'
__UpperCAmelCase : Any = '.lz4'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'xz'
__UpperCAmelCase : int = 'xz'
__UpperCAmelCase : Union[str, Any] = '.xz'
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : str = 'zstd'
__UpperCAmelCase : Optional[Any] = '.zst'
def __init__(self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str = "rb" , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[dict] = None , __UpperCAmelCase : int = DEFAULT_BLOCK_SIZE , **__UpperCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
fo=__UpperCAmelCase , mode=__UpperCAmelCase , target_protocol=__UpperCAmelCase , target_options=__UpperCAmelCase , block_size=__UpperCAmelCase , **__UpperCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ = self.file.__enter__
class A :
def __init__(self : List[Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = file_
def __enter__(self : List[str] ) -> Tuple:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__(self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
self._file.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def __iter__(self : str ) -> List[Any]:
"""simple docstring"""
return iter(self._file )
def lowercase_ (self : int ) -> Any:
"""simple docstring"""
return next(self._file )
def __getattr__(self : Optional[int] , __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return getattr(self._file , __UpperCAmelCase )
def fixed_enter(*__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
return WrappedFile(_enter(*__UpperCAmelCase , **__UpperCAmelCase ) )
UpperCAmelCase__ = fixed_enter
| 65 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_12,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def A_ ( _lowercase ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
__a = parser.parse_args()
__a = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 66 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCAmelCase =2
class a__ :
def __init__( self : int , *, # begin keyword-only arguments
a : Dict="<s>" , a : Dict="<pad>" , a : int="</s>" , a : List[Any]="<unk>" , a : int=None , ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = bos, unk, pad, eos
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = self.add_symbol(a )
__lowerCamelCase = self.add_symbol(a )
__lowerCamelCase = self.add_symbol(a )
__lowerCamelCase = self.add_symbol(a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(a )
__lowerCamelCase = len(self.symbols )
def __eq__( self : Dict , a : Tuple ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : List[Any] , a : Tuple ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Optional[int] , a : List[str] ):
"""simple docstring"""
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , a : Dict ):
"""simple docstring"""
__lowerCamelCase = cls()
d.add_from_file(a )
return d
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Tuple , a : Any=1 , a : List[str]=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
__lowerCamelCase = self.indices[word]
__lowerCamelCase = self.count[idx] + n
return idx
else:
__lowerCamelCase = len(self.symbols )
__lowerCamelCase = idx
self.symbols.append(a )
self.count.append(a )
return idx
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : Any ):
"""simple docstring"""
return 0
def SCREAMING_SNAKE_CASE__ ( self : Any , a : str ):
"""simple docstring"""
if isinstance(a , a ):
try:
with open(a , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(a ) )
return
__lowerCamelCase = f.readlines()
__lowerCamelCase = self._load_meta(a )
for line in lines[indices_start_line:]:
try:
__lowerCamelCase , __lowerCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
__lowerCamelCase = True
__lowerCamelCase , __lowerCamelCase = line.rsplit(''' ''' , 1 )
else:
__lowerCamelCase = False
__lowerCamelCase = int(a )
__lowerCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(a ) )
self.add_symbol(a , n=a , overwrite=a )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __lowerCAmelCase ( UpperCamelCase__ ) -> Tuple:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCamelCase = dict((re.sub(r'''@@$''' , '''''' , UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCamelCase__ ), v) for k, v in d.items() )
__lowerCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
__lowerCamelCase = d[k] # restore
return da
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
# prep
if not os.path.exists(UpperCamelCase__ ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
__lowerCamelCase = os.path.join(UpperCamelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )
__lowerCamelCase = chkpt['''cfg''']['''model''']
# dicts
__lowerCamelCase = os.path.join(UpperCamelCase__ , '''dict.txt''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
__lowerCamelCase = Dictionary.load(UpperCamelCase__ )
__lowerCamelCase = rewrite_dict_keys(src_dict.indices )
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
__lowerCamelCase = os.path.join(UpperCamelCase__ , '''bpecodes''' )
if not os.path.isfile(UpperCamelCase__ ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
__lowerCamelCase = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
# model config
__lowerCamelCase = os.path.join(UpperCamelCase__ , '''config.json''' )
__lowerCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
__lowerCamelCase = chkpt['''model''']
# remove unneeded keys
__lowerCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__lowerCamelCase = model_state_dict.pop(UpperCamelCase__ )
else:
__lowerCamelCase = model_state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = BioGptConfig.from_pretrained(UpperCamelCase__ )
__lowerCamelCase = BioGptForCausalLM(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ )
# save
__lowerCamelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 67 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
lowerCAmelCase__ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModel)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class a__ ( _BaseAutoModelClass ):
"""simple docstring"""
__lowerCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 68 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
snake_case_ : str = int(lowercase_ )
snake_case_ : Any = dict(sorted(self.labels.items() ) )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Tuple = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : Any = len(lowercase_ )
snake_case_ : List[str] = self.transformer.config.sample_size
snake_case_ : Union[str, Any] = self.transformer.config.in_channels
snake_case_ : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ : Optional[int] = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
snake_case_ : Dict = torch.tensor([1000] * batch_size , device=self.device )
snake_case_ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
snake_case_ : Union[str, Any] = torch.cat([half, half] , dim=0 )
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : int = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
snake_case_ : Optional[int] = torch.intaa if is_mps else torch.intaa
snake_case_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ : List[Any] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_, snake_case_ : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_, snake_case_ : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
snake_case_ : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ : str = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_, snake_case_ : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
snake_case_ : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case_, snake_case_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ : Dict = latent_model_input
snake_case_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
snake_case_ : Tuple = self.vae.decode(lowercase_ ).sample
snake_case_ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 264 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ['''small''', '''medium''', '''large''']
__UpperCamelCase = '''lm_head.decoder.weight'''
__UpperCamelCase = '''lm_head.weight'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = torch.load(UpperCAmelCase )
snake_case_ = d.pop(UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
torch.save(UpperCAmelCase , os.path.join(UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__UpperCamelCase = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 69 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
A__ : Dict ='''scheduler_config.json'''
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[Any] = 1
_lowercase: List[str] = 2
_lowercase: str = 3
_lowercase: Union[str, Any] = 4
_lowercase: Dict = 5
@dataclass
class UpperCAmelCase ( snake_case_ ):
_lowercase: jnp.ndarray
class UpperCAmelCase :
_lowercase: Any = SCHEDULER_CONFIG_NAME
_lowercase: Any = ['''dtype''']
_lowercase: Optional[int] = []
_lowercase: Union[str, Any] = True
@classmethod
def lowercase__ ( cls : List[str] , __snake_case : Dict[str, Any] = None , __snake_case : Optional[str] = None , __snake_case : List[Any]=False , **__snake_case : List[Any] , ) -> int:
_lowerCAmelCase , _lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=__snake_case , subfolder=__snake_case , return_unused_kwargs=__snake_case , **__snake_case , )
_lowerCAmelCase , _lowerCAmelCase = cls.from_config(__snake_case , return_unused_kwargs=__snake_case , **__snake_case )
if hasattr(__snake_case , """create_state""" ) and getattr(__snake_case , """has_state""" , __snake_case ):
_lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase__ ( self : Union[str, Any] , __snake_case : Union[str, os.PathLike] , __snake_case : bool = False , **__snake_case : Dict ) -> int:
self.save_config(save_directory=__snake_case , push_to_hub=__snake_case , **__snake_case )
@property
def lowercase__ ( self : Optional[int] ) -> Dict:
return self._get_compatibles()
@classmethod
def lowercase__ ( cls : List[str] ) -> Optional[int]:
_lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase = importlib.import_module(__name__.split(""".""" )[0] )
_lowerCAmelCase = [
getattr(__snake_case , __snake_case ) for c in compatible_classes_str if hasattr(__snake_case , __snake_case )
]
return compatible_classes
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
assert len(lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCAmelCase ) - x.ndim) ) , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=0.999 , lowerCAmelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(lowerCAmelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_lowerCAmelCase = []
for i in range(lowerCAmelCase ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCAmelCase ) / alpha_bar(lowerCAmelCase ) , lowerCAmelCase ) )
return jnp.array(lowerCAmelCase , dtype=lowerCAmelCase )
@flax.struct.dataclass
class UpperCAmelCase :
_lowercase: jnp.ndarray
_lowercase: jnp.ndarray
_lowercase: jnp.ndarray
@classmethod
def lowercase__ ( cls : int , __snake_case : List[Any] ) -> int:
_lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_lowerCAmelCase = 1.0 - betas
_lowerCAmelCase = jnp.cumprod(__snake_case , axis=0 )
return cls(
alphas=__snake_case , betas=__snake_case , alphas_cumprod=__snake_case , )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = state.alphas_cumprod
_lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase = sqrt_alpha_prod.flatten()
_lowerCAmelCase = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
_lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase = broadcast_to_shape_from_left(lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_sqrt_alpha_prod(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 70 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : str =set_counts
__UpperCamelCase : Optional[Any] =max(lowerCamelCase__ )
__UpperCamelCase : Dict =len(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =[1] * num_sets
__UpperCamelCase : Optional[Any] =list(range(lowerCamelCase__ ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.get_parent(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.get_parent(lowerCamelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCamelCase : int =0
__UpperCamelCase : Any =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCamelCase : List[str] =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =src_parent
__UpperCamelCase : int =self.set_counts[src_parent]
__UpperCamelCase : Tuple =max(self.max_set , lowerCamelCase__ )
return True
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCamelCase : Dict =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 71 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
"""simple docstring"""
import math
from collections.abc import Callable
def snake_case_ ( A_ : Callable[[float], float], A_ : float, A_ : float ):
'''simple docstring'''
_lowerCamelCase : float = xa
_lowerCamelCase : float = xa
while True:
if x_n == x_na or function(A_ ) == function(A_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_lowerCamelCase : float = x_na - (
function(A_ ) / ((function(A_ ) - function(A_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCamelCase : int = x_na
_lowerCamelCase : List[Any] = x_na
def snake_case_ ( A_ : float ):
'''simple docstring'''
return math.pow(A_, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 72 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
a ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''conditional_detr'''
_UpperCAmelCase : int = ['''past_key_values''']
_UpperCAmelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : int=3_0_0 ,SCREAMING_SNAKE_CASE__ : str=6 ,SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : int=6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : List[Any]=8 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Dict="sine" ,SCREAMING_SNAKE_CASE__ : int="resnet50" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Dict=0.25 ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowerCamelCase : str = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : int = backbone_config.get('model_type')
__lowerCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : int = config_class.from_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = use_timm_backbone
__lowerCamelCase : Dict = backbone_config
__lowerCamelCase : int = num_channels
__lowerCamelCase : Union[str, Any] = num_queries
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : str = encoder_ffn_dim
__lowerCamelCase : Union[str, Any] = encoder_layers
__lowerCamelCase : Union[str, Any] = encoder_attention_heads
__lowerCamelCase : Union[str, Any] = decoder_ffn_dim
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : int = decoder_attention_heads
__lowerCamelCase : Optional[Any] = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : Any = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Dict = init_std
__lowerCamelCase : int = init_xavier_std
__lowerCamelCase : Any = encoder_layerdrop
__lowerCamelCase : str = decoder_layerdrop
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : List[str] = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : List[str] = backbone
__lowerCamelCase : Dict = use_pretrained_backbone
__lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
__lowerCamelCase : Dict = class_cost
__lowerCamelCase : Dict = bbox_cost
__lowerCamelCase : Any = giou_cost
# Loss coefficients
__lowerCamelCase : List[str] = mask_loss_coefficient
__lowerCamelCase : Optional[int] = dice_loss_coefficient
__lowerCamelCase : Any = cls_loss_coefficient
__lowerCamelCase : str = bbox_loss_coefficient
__lowerCamelCase : str = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : int):
return self.d_model
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowerCamelCase : str = self.backbone_config.to_dict()
__lowerCamelCase : Optional[int] = self.__class__.model_type
return output
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[int]):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def lowerCAmelCase ( self : Optional[Any]):
return 1E-5
@property
def lowerCAmelCase ( self : str):
return 1_2
| 73 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
"""simple docstring"""
import datasets
_lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Union[str, Any] ) -> Optional[Any]:
return {"accuracy": simple_accuracy(A_ ,A_ )} | 74 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCamelCase_ =''''''
lowerCamelCase_ =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCamelCase_, lowerCamelCase_ =0, 0
# length[i] shows the length of palindromic substring with center i
lowerCamelCase_ =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
lowerCamelCase_ =0
for j in range(len(__snake_case ) ):
lowerCamelCase_ =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCamelCase_ =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCamelCase_ =j - k + 1 # noqa: E741
lowerCamelCase_ =j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCamelCase_ =length[j]
lowerCamelCase_ =j
# create that string
lowerCamelCase_ =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 76 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , a , a=1_2 , a=7 , a=True , a=True , a=True , a=9_9 , a=3_2 , a=3_2 , a=2 , a=4 , a=3_7 , a=0.1 , a=0.1 , a=5_1_2 , a=0.02 , a=0 , a=None , ) -> Union[str, Any]:
lowercase__ : Any = parent
lowercase__ : str = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : int = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : int = projection_dim
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : str = initializer_range
lowercase__ : Tuple = scope
lowercase__ : int = bos_token_id
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : int = None
if self.use_input_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ : int = input_mask.numpy()
lowercase__ , lowercase__ : Tuple = input_mask.shape
lowercase__ : List[str] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
lowercase__ : Dict = 1
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCAmelCase ( self ) -> List[Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , a , a , a ) -> Any:
lowercase__ : List[Any] = TFBlipTextModel(config=a )
lowercase__ : Optional[int] = model(a , attention_mask=a , training=a )
lowercase__ : List[str] = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = BlipTextModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> str:
pass
@slow
def _UpperCAmelCase ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self , a=True ) -> List[str]:
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 77 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :int , lowercase_ :int , lowercase_ :int = 13 , lowercase_ :int = 64 , lowercase_ :int = 2 , lowercase_ :int = 3 , lowercase_ :int = 3 , lowercase_ :bool = True , lowercase_ :bool = True , lowercase_ :int = 1_28 , lowercase_ :Union[str, Any]=[16, 32, 64, 1_28] , lowercase_ :int = 7 , lowercase_ :int = 4 , lowercase_ :int = 37 , lowercase_ :str = "gelu" , lowercase_ :float = 0.1 , lowercase_ :float = 0.1 , lowercase_ :int = 10 , lowercase_ :float = 0.02 , lowercase_ :int = 2 , lowercase_ :int = 1 , lowercase_ :int = 1_28 , lowercase_ :List[int] = [2, 2, 2, 2] , lowercase_ :int = 2 , lowercase_ :int = 2 , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = encoder_stride
UpperCAmelCase = num_attention_outputs
UpperCAmelCase = embed_dim
UpperCAmelCase = embed_dim + 1
UpperCAmelCase = resolution
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = dim
UpperCAmelCase = mlp_expansion_ratio
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Any ) -> str:
UpperCAmelCase = TFEfficientFormerModel(config=lowercase_ )
UpperCAmelCase = model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :Optional[int] ) -> int:
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFEfficientFormerForImageClassification(lowercase_ )
UpperCAmelCase = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFEfficientFormerForImageClassification(lowercase_ )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self :Tuple ) -> str:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :List[Any] ) -> int:
UpperCAmelCase = TFEfficientFormerModelTester(self )
UpperCAmelCase = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ) -> Any:
pass
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase__ ( self :Dict ) -> str:
def check_hidden_states_output(lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :int ):
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
UpperCAmelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
UpperCAmelCase = seq_length * self.model_tester.chunk_length
else:
UpperCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
UpperCAmelCase = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase_ , (list, tuple) )
self.assertEqual(len(lowercase_ ) , lowercase_ )
UpperCAmelCase = getattr(self.model_tester , 'seq_length' , lowercase_ )
UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , lowercase_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :Tuple=False ) -> Optional[int]:
UpperCAmelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self :Tuple ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def UpperCAmelCase__ ( self :Tuple ) -> List[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFEfficientFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester , 'seq_length' , lowercase_ )
UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , lowercase_ )
UpperCAmelCase = getattr(self.model_tester , 'key_length' , lowercase_ )
UpperCAmelCase = getattr(self.model_tester , 'chunk_length' , lowercase_ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
UpperCAmelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) , training=lowercase_ )
UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCAmelCase = model_class(lowercase_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCAmelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCAmelCase = model(lowercase_ )
self.assertTrue(outputs_dict is not None )
def _lowerCAmelCase ( ):
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[int] ) -> List[Any]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase = model(**lowercase_ , training=lowercase_ )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='tf' )
# forward pass
UpperCAmelCase = model(**lowercase_ , training=lowercase_ )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 78 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(__UpperCAmelCase )
def __call__( self : List[Any] , __UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__UpperCAmelCase : Any ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , **__UpperCAmelCase : str ):
'''simple docstring'''
return {}, {}, {}
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = load_image(__UpperCAmelCase )
_A = image.size
_A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = model_outputs.predicted_depth
_A = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__UpperCAmelCase )
_A = prediction.squeeze().cpu().numpy()
_A = (output * 255 / np.max(__UpperCAmelCase )).astype("uint8" )
_A = Image.fromarray(__UpperCAmelCase )
_A = {}
_A = predicted_depth
_A = depth
return output_dict
| 79 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = Dict[str, Any]
a__ : List[Any] = List[Prediction]
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
super().__init__(*a , **a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __a ( self , **a ):
UpperCamelCase__ = {}
if "threshold" in kwargs:
UpperCamelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a ):
return super().__call__(*a , **a )
def __a ( self , a ):
UpperCamelCase__ = load_image(a )
UpperCamelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCamelCase__ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCamelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCamelCase__ = target_size
return inputs
def __a ( self , a ):
UpperCamelCase__ = model_inputs.pop("target_size" )
UpperCamelCase__ = self.model(**a )
UpperCamelCase__ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCamelCase__ = model_inputs["bbox"]
return model_outputs
def __a ( self , a , a=0.9 ):
UpperCamelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCamelCase__ , UpperCamelCase__ = target_size[0].tolist()
def unnormalize(a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
UpperCamelCase__ , UpperCamelCase__ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCamelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCamelCase__ = [unnormalize(a ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCamelCase__ = ["score", "label", "box"]
UpperCamelCase__ = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCamelCase__ = self.image_processor.post_process_object_detection(a , a , a )
UpperCamelCase__ = raw_annotations[0]
UpperCamelCase__ = raw_annotation["scores"]
UpperCamelCase__ = raw_annotation["labels"]
UpperCamelCase__ = raw_annotation["boxes"]
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCamelCase__ = [self._get_bounding_box(a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCamelCase__ = ["score", "label", "box"]
UpperCamelCase__ = [
dict(zip(a , a ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __a ( self , a ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = box.int().tolist()
UpperCamelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 80 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase_ : Optional[Any] = """src/diffusers"""
# Matches is_xxx_available()
lowerCamelCase_ : Dict = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCamelCase_ : Tuple = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCamelCase_ : List[str] = """
{0} = None
"""
lowerCamelCase_ : str = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCamelCase_ : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _A ( lowercase ):
"""simple docstring"""
a =_re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _A ( ):
"""simple docstring"""
with open(os.path.join(lowercase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
# Get to the point we do the actual imports for type checking
a =0
a ={}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
a =[]
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
a =lines[line_index]
a =_re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
a =objects
else:
line_index += 1
return backend_specific_objects
def _A ( lowercase , lowercase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _A ( lowercase=None ):
"""simple docstring"""
if backend_specific_objects is None:
a =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a ={}
for backend, objects in backend_specific_objects.items():
a ='''[''' + ''', '''.join(f'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
a ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
a =dummy_file
return dummy_files
def _A ( lowercase=False ):
"""simple docstring"""
a =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
a =os.path.join(lowercase , '''utils''' )
a ={
backend: os.path.join(lowercase , f'''dummy_{short_names.get(lowercase , lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
a ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.read()
else:
a =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f'''diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase_ : Optional[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 81 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
A__ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
__lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.task_name.lower()
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''train'''
__lowerCamelCase = '''dev'''
__lowerCamelCase = '''test'''
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _snake_case , )
_lowerCAmelCase = args
_lowerCAmelCase = glue_processors[args.task_name]()
_lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
_lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase = label_list[2], label_list[1]
_lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCAmelCase = examples[:limit_length]
_lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
_lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def snake_case ( self ):
"""simple docstring"""
return self.label_list
| 82 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( _a , _a ):
# Load checkpoint
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
snake_case_ : Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
snake_case_ : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Union[str, Any] = v
else:
snake_case_ : Dict = v
snake_case_ : Union[str, Any] = chkpt['''params''']
snake_case_ : int = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : int = chkpt['''dico_word2id''']
snake_case_ : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 264 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
def __init__( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Union[str, Any]=30 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[Any]=32 ,lowerCamelCase__ : List[str]=5 ,lowerCamelCase__ : Optional[Any]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : int=0.6 ,lowerCamelCase__ : Tuple=None ,):
'''simple docstring'''
_UpperCamelCase : Dict = parent
_UpperCamelCase : Tuple = batch_size
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : Tuple = patch_size
_UpperCamelCase : Optional[Any] = num_channels
_UpperCamelCase : int = is_training
_UpperCamelCase : int = use_labels
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Any = num_hidden_layers
_UpperCamelCase : int = num_attention_heads
_UpperCamelCase : List[Any] = intermediate_size
_UpperCamelCase : List[Any] = hidden_act
_UpperCamelCase : Optional[Any] = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : int = type_sequence_label_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : List[Any] = mask_ratio
_UpperCamelCase : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = ViTMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : int = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : List[str] = model(lowerCamelCase__ )
_UpperCamelCase : List[str] = (self.image_size // self.patch_size) ** 2
_UpperCamelCase : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase : int = 1
_UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : str = model(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase , lowercase , unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ViTMAEModelTester(self )
_UpperCamelCase : List[str] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : str = model_class(lowerCamelCase__ )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : int = [*signature.parameters.keys()]
_UpperCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
_UpperCamelCase : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_UpperCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase : Any = torch.from_numpy(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase : Optional[Any] = pt_noise
super().check_pt_tf_models(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
_UpperCamelCase : Any = outputs[0].cpu().numpy()
_UpperCamelCase : List[str] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = model_class.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
# Make sure we don't have nans
_UpperCamelCase : str = after_outputs[0].cpu().numpy()
_UpperCamelCase : Optional[int] = 0
_UpperCamelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ ,1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = ViTMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A__ ( ):
_UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCamelCase : str = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(lowerCamelCase__ )
_UpperCamelCase : Any = self.default_image_processor
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : List[str] = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase : Any = ViTMAEConfig()
_UpperCamelCase : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_UpperCamelCase : Any = model(**lowerCamelCase__ ,noise=torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ ) )
# verify the logits
_UpperCamelCase : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
_UpperCamelCase : List[str] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowerCamelCase__ ) ,atol=1E-4 ) )
| 83 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) )
snake_case_ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
snake_case_ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(self.tmpdirname , a__ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
# load decoder from hub
snake_case_ = "hf-internal-testing/ngram-beam-search-decoder"
def lowerCAmelCase__ ( self , **a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = self.add_kwargs_tokens_map.copy()
kwargs.update(a__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> int:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase__ ( self , **a__ ) -> Optional[int]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(a__ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = floats_list((3, 1_000) )
snake_case_ = feature_extractor(a__ , return_tensors="np" )
snake_case_ = processor(a__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = "This is a test string"
snake_case_ = processor(text=a__ )
snake_case_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self , a__=(2, 10, 16) , a__=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(a__ )
return np.random.rand(*a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case_ = processor.decode(a__ )
snake_case_ = decoder.decode_beams(a__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case_ = processor.batch_decode(a__ )
else:
with get_context(a__ ).Pool() as pool:
snake_case_ = processor.batch_decode(a__ , a__ )
snake_case_ = list(a__ )
with get_context("fork" ).Pool() as p:
snake_case_ = decoder.decode_beams_batch(a__ , a__ )
snake_case_ , snake_case_ , snake_case_ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a__ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(a__ , decoded_processor.logit_score )
self.assertListEqual(a__ , decoded_processor.lm_score )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
snake_case_ = 15
snake_case_ = -2_0.0
snake_case_ = -4.0
snake_case_ = processor.batch_decode(
a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
snake_case_ = decoded_processor_out.text
snake_case_ = list(a__ )
with get_context("fork" ).Pool() as pool:
snake_case_ = decoder.decode_beams_batch(
a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , )
snake_case_ = [d[0][0] for d in decoded_decoder_out]
snake_case_ = [d[0][2] for d in decoded_decoder_out]
snake_case_ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , a__ )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , a__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , a__ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
snake_case_ = self._get_dummy_logits()
snake_case_ = 2.0
snake_case_ = 5.0
snake_case_ = -2_0.0
snake_case_ = True
snake_case_ = processor.batch_decode(
a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
snake_case_ = decoded_processor_out.text
snake_case_ = list(a__ )
decoder.reset_params(
alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , )
with get_context("fork" ).Pool() as pool:
snake_case_ = decoder.decode_beams_batch(
a__ , a__ , )
snake_case_ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a__ , a__ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , a__ )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ = os.listdir(a__ )
snake_case_ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = snapshot_download("hf-internal-testing/processor_with_lm" )
snake_case_ = WavaVecaProcessorWithLM.from_pretrained(a__ )
snake_case_ = processor.decoder.model_container[processor.decoder._model_key]
snake_case_ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
snake_case_ = os.listdir(a__ )
snake_case_ = os.listdir(a__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a__ , a__ )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = floats_list((3, 1_000) )
snake_case_ = processor_wavaveca(a__ , return_tensors="np" )
snake_case_ = processor_auto(a__ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
snake_case_ = self._get_dummy_logits()
snake_case_ = processor_wavaveca.batch_decode(a__ )
snake_case_ = processor_auto.batch_decode(a__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.get_feature_extractor()
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_decoder()
snake_case_ = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def lowerCAmelCase__ ( a__ , a__ ) -> int:
'''simple docstring'''
snake_case_ = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = self._get_dummy_logits()[0]
snake_case_ = processor.decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
snake_case_ = self._get_dummy_logits()
snake_case_ = processor.batch_decode(a__ , output_word_offsets=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(a__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
import torch
snake_case_ = load_dataset("common_voice" , "en" , split="train" , streaming=a__ )
snake_case_ = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
snake_case_ = iter(a__ )
snake_case_ = next(a__ )
snake_case_ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
snake_case_ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case_ = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
snake_case_ = model(a__ ).logits.cpu().numpy()
snake_case_ = processor.decode(logits[0] , output_word_offsets=a__ )
snake_case_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case_ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
snake_case_ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(a__ , "word" ) ) , a__ )
self.assertEqual(" ".join(self.get_from_offsets(a__ , "word" ) ) , output.text )
# output times
snake_case_ = torch.tensor(self.get_from_offsets(a__ , "start_time" ) )
snake_case_ = torch.tensor(self.get_from_offsets(a__ , "end_time" ) )
# fmt: off
snake_case_ = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
snake_case_ = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(a__ , a__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(a__ , a__ , atol=0.0_1 ) )
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , _SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
__lowerCAmelCase : Any = self.model.config
else:
__lowerCAmelCase : int = config
__lowerCAmelCase : Any = data_args
__lowerCAmelCase : int = self.config.tgt_vocab_size if isinstance(self.config , _SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
' padding..' )
if self.args.label_smoothing == 0:
__lowerCAmelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCAmelCase : int = label_smoothed_nll_loss
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.optimizer is None:
__lowerCAmelCase : Optional[int] = ['bias', 'LayerNorm.weight']
__lowerCAmelCase : Dict = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
__lowerCAmelCase : List[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCAmelCase : int = Adafactor
__lowerCAmelCase : List[Any] = {'scale_parameter': False, 'relative_step': False}
else:
__lowerCAmelCase : Any = AdamW
__lowerCAmelCase : int = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
__lowerCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
__lowerCAmelCase : Optional[Any] = OSS(
params=_SCREAMING_SNAKE_CASE , optim=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : int = optimizer_cls(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__lowerCAmelCase : Any = self._get_lr_scheduler(_SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCAmelCase : Union[str, Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCAmelCase : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__lowerCAmelCase : List[str] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
return scheduler
def __lowerCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Optional[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Optional[int] = torch.nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = inputs.pop('labels' )
__lowerCAmelCase , __lowerCAmelCase : Any = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return loss
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Tuple = self._prepare_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCAmelCase : Tuple = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
__lowerCAmelCase : Any = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self._compute_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCAmelCase : Tuple = self._pad_tensors_to_max_len(_SCREAMING_SNAKE_CASE , gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# If PAD token is not defined at least EOS token has to be defined
__lowerCAmelCase : Any = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f" padded to `max_length`={max_length}" )
__lowerCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__lowerCAmelCase : Dict = tensor
return padded_tensor | 86 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case_ ( __A ):
__A : Tuple = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__A : str = "CIDAS/clipseg-rd64-refined"
__A : int = "image_segmenter"
__A : Union[str, Any] = CLIPSegForImageSegmentation
__A : str = ["image", "text"]
__A : Union[str, Any] = ["image"]
def __init__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : "Image" , lowercase_ : str ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors="pt" )
def __UpperCamelCase ( self : int , lowercase_ : Any ) -> List[str]:
with torch.no_grad():
lowercase__ : List[str] = self.model(**lowercase_ ).logits
return logits
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> Any:
lowercase__ : List[str] = outputs.cpu().detach().numpy()
lowercase__ : int = 0
lowercase__ : Optional[int] = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'Salesforce/blip-image-captioning-base'
lowerCAmelCase : Tuple = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCAmelCase : Optional[int] = 'image_captioner'
lowerCAmelCase : List[str] = AutoModelForVisionaSeq
lowerCAmelCase : Tuple = ['image']
lowerCAmelCase : Optional[Any] = ['text']
def __init__( self : Dict ,*_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ):
requires_backends(self ,['vision'] )
super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : "Image" ):
return self.pre_processor(images=_UpperCAmelCase ,return_tensors='pt' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int ):
return self.model.generate(**_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Dict ):
return self.pre_processor.batch_decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase )[0].strip()
| 89 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 0 |
__A = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 90 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
snake_case_ : str = int(lowercase_ )
snake_case_ : Any = dict(sorted(self.labels.items() ) )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Tuple = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : Any = len(lowercase_ )
snake_case_ : List[str] = self.transformer.config.sample_size
snake_case_ : Union[str, Any] = self.transformer.config.in_channels
snake_case_ : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ : Optional[int] = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
snake_case_ : Dict = torch.tensor([1000] * batch_size , device=self.device )
snake_case_ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
snake_case_ : Union[str, Any] = torch.cat([half, half] , dim=0 )
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : int = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
snake_case_ : Optional[int] = torch.intaa if is_mps else torch.intaa
snake_case_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ : List[Any] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_, snake_case_ : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_, snake_case_ : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
snake_case_ : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ : str = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_, snake_case_ : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
snake_case_ : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case_, snake_case_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ : Dict = latent_model_input
snake_case_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
snake_case_ : Tuple = self.vae.decode(lowercase_ ).sample
snake_case_ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 264 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = StableDiffusionSAGPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextModel(lowercase_)
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
SCREAMING_SNAKE_CASE_ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : str = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
SCREAMING_SNAKE_CASE_ : List[str] = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''.'''
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
SCREAMING_SNAKE_CASE_ : Dict = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = '''.'''
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
SCREAMING_SNAKE_CASE_ : List[Any] = sag_pipe.to(lowercase_)
sag_pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = '''.'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : str = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : List[str] = output.images
assert image.shape == (1, 512, 768, 3)
| 91 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
__lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
__lowerCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
__lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
__lowerCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = AudioClassificationPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
# test with a raw waveform
lowercase_ : Tuple = np.zeros((3_40_00,) )
lowercase_ : str = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = examples
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
lowercase_ : int = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(__SCREAMING_SNAKE_CASE )
@require_torchaudio
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import datasets
# test with a local file
lowercase_ : Optional[int] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase_ : Any = dataset[0]['''audio''']['''array''']
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase_ : str = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = np.ones((80_00,) )
lowercase_ : List[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
lowercase_ : Dict = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
lowercase_ : Optional[int] = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase_ : List[str] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase_ : Dict = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import datasets
lowercase_ : Any = '''superb/wav2vec2-base-superb-ks'''
lowercase_ : Optional[Any] = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase_ : int = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase_ : Optional[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
import mpmath # for roots of unity
import numpy as np
class _snake_case :
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None ):
# Input as list
a :int = list(poly_a or [0] )[:]
a :List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a :Tuple = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a :Any = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a :Dict = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a :Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a :Tuple = self.__multiply()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowerCamelCase ) <= 1:
return dft[0]
#
a :Dict = self.c_max_length // 2
while next_ncol > 0:
a :Union[str, Any] = [[] for i in range(_lowerCamelCase )]
a :Union[str, Any] = self.root**next_ncol
# First half of next step
a :str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a :int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a :Tuple = new_dft
a :Optional[Any] = next_ncol // 2
return dft[0]
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = self.__dft('''A''' )
a :List[Any] = self.__dft('''B''' )
a :Dict = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a :Dict = 2
while next_ncol <= self.c_max_length:
a :str = [[] for i in range(_lowerCamelCase )]
a :List[Any] = self.root ** (next_ncol // 2)
a :List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a :int = new_inverse_c
next_ncol *= 2
# Unpack
a :Any = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
a :Dict = '''A = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
a :Any = '''B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
a :Tuple = '''A*B = ''' + ''' + '''.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
def _A ( SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
a__ : List[str] =sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Dict = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : List[str] = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def a ( __a , __a , __a ) -> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 98 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
import random
class A__ :
"""simple docstring"""
@staticmethod
def __lowercase ( lowercase) -> tuple[list[int], list[int]]:
'''simple docstring'''
a__ : Union[str, Any] = [ord(lowercase) for i in text]
a__ : int = []
a__ : List[str] = []
for i in plain:
a__ : Optional[Any] = random.randint(1 , 300)
a__ : Optional[Any] = (i + k) * k
cipher.append(lowercase)
key.append(lowercase)
return cipher, key
@staticmethod
def __lowercase ( lowercase , lowercase) -> str:
'''simple docstring'''
a__ : str = []
for i in range(len(lowercase)):
a__ : Dict = int((cipher[i] - (key[i]) ** 2) / key[i])
plain.append(chr(lowercase))
return "".join(lowercase)
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 99 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : List[Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE = OmegaConf.load(UpperCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase_ ) ) )
return config
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if conf_path is None:
__SCREAMING_SNAKE_CASE = """./model_checkpoints/vqgan_only.yaml"""
__SCREAMING_SNAKE_CASE = load_config(UpperCamelCase_ , display=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE = """./model_checkpoints/vqgan_only.pt"""
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE = sd["""state_dict"""]
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.to(UpperCamelCase_ )
del sd
return model
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = model.encode(UpperCamelCase_ )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__SCREAMING_SNAKE_CASE = model.decode(UpperCamelCase_ )
return xrec
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = string.rsplit(""".""" , 1 )
if reload:
__SCREAMING_SNAKE_CASE = importlib.import_module(UpperCamelCase_ )
importlib.reload(UpperCamelCase_ )
return getattr(importlib.import_module(UpperCamelCase_ , package=UpperCamelCase_ ) , cls )
def _lowerCAmelCase ( UpperCamelCase_ ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=True ):
__SCREAMING_SNAKE_CASE = instantiate_from_config(UpperCamelCase_ )
if sd is not None:
model.load_state_dict(UpperCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pl_sd["""global_step"""]
print(f"loaded model from global step {global_step}." )
else:
__SCREAMING_SNAKE_CASE = {"""state_dict""": None}
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=UpperCamelCase_ , eval_mode=UpperCamelCase_ )["""model"""]
return model, global_step
| 100 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = "Hello world! cécé herlolip"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
lowercase = roberta.model.encoder.sentence_encoder
lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
lowercase = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = roberta_sent_encoder.embed_tokens.weight
lowercase = roberta_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase = roberta_sent_encoder.layer_norm.weight
lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = roberta_sent_encoder.layers[i]
lowercase = layer.attention
lowercase = roberta_layer.self_attn_layer_norm.weight
lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase = roberta_layer.self_attn.q_proj.weight
lowercase = roberta_layer.self_attn.q_proj.bias
lowercase = roberta_layer.self_attn.k_proj.weight
lowercase = roberta_layer.self_attn.k_proj.bias
lowercase = roberta_layer.self_attn.v_proj.weight
lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase = roberta_layer.self_attn.out_proj.weight
lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase = roberta_layer.final_layer_norm.weight
lowercase = roberta_layer.final_layer_norm.bias
# intermediate
lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# output
lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].dense.weight
lowercase = roberta.model.classification_heads['''mnli'''].dense.bias
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowercase = roberta.model.encoder.lm_head.dense.weight
lowercase = roberta.model.encoder.lm_head.dense.bias
lowercase = roberta.model.encoder.lm_head.layer_norm.weight
lowercase = roberta.model.encoder.lm_head.layer_norm.bias
lowercase = roberta.model.encoder.lm_head.weight
lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase = model(lowerCAmelCase__ )[0]
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) )
else:
lowercase = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 101 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
lowercase__ : str = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case_ : List[Any] = int(re.match(r'''.*layer_(\d*).*''' , _a )[1] )
layer_number -= 3
return f"h.{layer_number}." + key
def __lowercase ( _a ):
if dtype == torch.bool:
return 1 / 8
snake_case_ : Dict = re.search(r'''[^\d](\d+)$''' , str(_a ) )
if bit_search is None:
raise ValueError(f"`dtype` is not a valid dtype: {dtype}." )
snake_case_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def __lowercase ( _a , _a , _a , _a , _a ):
# Construct model
if bloom_config_file == "":
snake_case_ : int = BloomConfig()
else:
snake_case_ : List[str] = BloomConfig.from_json_file(_a )
if shard_model:
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : int = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[str] = {'''weight_map''': {}, '''metadata''': {}}
snake_case_ : Any = 0
snake_case_ : Union[str, Any] = None
snake_case_ : List[str] = BloomConfig()
for j, file in enumerate(_a ):
print('''Processing file: {}'''.format(_a ) )
snake_case_ : Dict = None
for i in range(_a ):
# load all TP files
snake_case_ : Union[str, Any] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : List[str] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : Any = temp.pop(_a )
if tensors is None:
snake_case_ : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Any = tensors[key] / pretraining_tp
torch.save(
_a , os.path.join(
_a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case_ : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_a ) ).zfill(5 ) )
snake_case_ : int = BloomConfig()
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Dict = total_size
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Tuple = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
f.write(_a )
else:
snake_case_ : Union[str, Any] = BloomModel(_a )
snake_case_ : List[str] = os.listdir(_a )
snake_case_ : Dict = sorted(filter(lambda _a : s.startswith('''layer''' ) and "model_00" in s , _a ) )
snake_case_ : List[Any] = None
for i, file in enumerate(_a ):
snake_case_ : Optional[Any] = None
for i in range(_a ):
# load all TP files
snake_case_ : List[str] = file.replace('''model_00''' , f"model_0{i}" )
snake_case_ : Optional[Any] = torch.load(os.path.join(_a , _a ) , map_location='''cpu''' )
# Rename keys in the transformers names
snake_case_ : str = list(temp.keys() )
for key in keys:
snake_case_ : str = temp.pop(_a )
if tensors is None:
snake_case_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case_ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case_ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case_ : Any = model.load_state_dict(_a , strict=_a )
assert not other_keys.unexpected_keys, f"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
snake_case_ : Optional[int] = set(other_keys.missing_keys )
else:
snake_case_ : Tuple = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(_a , exist_ok=_a )
snake_case_ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
snake_case_ : Optional[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
lowercase__ : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 264 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 0 |
from collections.abc import Sequence
def UpperCamelCase( __UpperCamelCase : Sequence[float] ,__UpperCamelCase : bool = False ):
if not arr:
return 0
lowerCAmelCase_ : Tuple = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ : Optional[Any] = 0.0
for num in arr:
lowerCAmelCase_ : Union[str, Any] = max(0 if allow_empty_subarrays else num ,curr_sum + num )
lowerCAmelCase_ : List[Any] = max(__UpperCamelCase ,__UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ : Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 103 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GLPNFeatureExtractor''']
lowerCAmelCase__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowercase ( _a , _a ):
# Load checkpoint
snake_case_ : Optional[Any] = torch.load(_a , map_location='''cpu''' )
snake_case_ : Union[str, Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
snake_case_ : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Union[str, Any] = v
else:
snake_case_ : Dict = v
snake_case_ : Union[str, Any] = chkpt['''params''']
snake_case_ : int = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : int = chkpt['''dico_word2id''']
snake_case_ : str = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_a , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 264 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( _lowercase : Features ) ->Optional[int]:
'''simple docstring'''
a : str = np.inf
def set_batch_size(_lowercase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_lowercase , _lowercase ):
a : Dict = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowercase , _lowercase ):
a : Tuple = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowercase , _lowercase ) and feature.dtype == "binary":
a : Optional[int] = min(_lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowercase , _lowercase )
return None if batch_size is np.inf else batch_size
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Union[str, Any] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
a : Dict = _PACKAGED_DATASETS_MODULES["parquet"][1]
a : str = Parquet(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , hash=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self ) -> Any:
# Build iterable dataset
if self.streaming:
a : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a : int = None
a : Any = None
a : Optional[int] = None
a : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
a : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
a : Tuple = dataset
a : int = path_or_buf
a : List[str] = batch_size or get_writer_batch_size(dataset.features )
a : Any = parquet_writer_kwargs
def __a ( self ) -> int:
a : Dict = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
a : Any = self._write(file_obj=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **self.parquet_writer_kwargs )
else:
a : Optional[Any] = self._write(file_obj=self.path_or_buf , batch_size=lowerCAmelCase__ , **self.parquet_writer_kwargs )
return written
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
a : Optional[int] = 0
a : Union[str, Any] = parquet_writer_kwargs.pop("path_or_buf" , lowerCAmelCase__ )
a : Any = self.dataset.features.arrow_schema
a : Optional[Any] = pq.ParquetWriter(lowerCAmelCase__ , schema=lowerCAmelCase__ , **lowerCAmelCase__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCAmelCase__ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
a : Dict = query_table(
table=self.dataset._data , key=slice(lowerCAmelCase__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCAmelCase__ )
written += batch.nbytes
writer.close()
return written
| 105 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 264 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = torch.exp(A_ )
lowerCAmelCase__ : Tuple = torch.sum(A_ , dim=1 ) # sum of exp(x_i)
lowerCAmelCase__ : Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A_ ) - B / A
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Optional[int] ):
super().__init__()
lowerCAmelCase__ : Tuple = config.output_attentions
lowerCAmelCase__ : List[str] = config.output_hidden_states
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList([BertLayer(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase__ : int = nn.ModuleList([BertHighway(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase__ : Any = [-1 for _ in range(config.num_hidden_layers )]
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ):
if (type(lowercase_ ) is float) or (type(lowercase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase__ : List[str] = x
else:
lowerCAmelCase__ : Optional[Any] = x
def __lowerCAmelCase ( self : Any ,lowercase_ : int ):
lowerCAmelCase__ : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Optional[Any]=None ,):
lowerCAmelCase__ : Tuple = ()
lowerCAmelCase__ : Optional[int] = ()
lowerCAmelCase__ : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase__ : Dict = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : str = layer_module(
lowercase_ ,lowercase_ ,head_mask[i] ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase__ : Optional[int] = all_attentions + (layer_outputs[1],)
lowerCAmelCase__ : str = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase__ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase__ : Any = current_outputs + (all_attentions,)
lowerCAmelCase__ : Dict = self.highway[i](lowercase_ )
# logits, pooled_output
if not self.training:
lowerCAmelCase__ : str = highway_exit[0]
lowerCAmelCase__ : str = entropy(lowercase_ )
lowerCAmelCase__ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase__ : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase__ : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase_ ,i + 1 )
else:
lowerCAmelCase__ : Union[str, Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase__ : List[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : Dict = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase__ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase__ : List[Any] = outputs + (all_attentions,)
lowerCAmelCase__ : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , a_ , )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Tuple ):
super().__init__(lowercase_ )
lowerCAmelCase__ : List[Any] = config
lowerCAmelCase__ : int = BertEmbeddings(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = DeeBertEncoder(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = BertPooler(lowercase_ )
self.init_weights()
def __lowerCAmelCase ( self : str ):
self.encoder.init_highway_pooler(self.pooler )
def __lowerCAmelCase ( self : Tuple ):
return self.embeddings.word_embeddings
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Optional[Any] ):
lowerCAmelCase__ : Any = value
def __lowerCAmelCase ( self : str ,lowercase_ : Tuple ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase_ )
@add_start_docstrings_to_model_forward(lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int=None ,lowercase_ : int=None ,lowercase_ : Tuple=None ,lowercase_ : Dict=None ,lowercase_ : Dict=None ,lowercase_ : Optional[int]=None ,lowercase_ : Optional[int]=None ,lowercase_ : str=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowerCAmelCase__ : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase__ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowerCAmelCase__ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(lowercase_ ,device=lowercase_ )
if encoder_attention_mask is None:
lowerCAmelCase__ : Tuple = torch.ones(lowercase_ ,device=lowercase_ )
if token_type_ids is None:
lowerCAmelCase__ : int = torch.zeros(lowercase_ ,dtype=torch.long ,device=lowercase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase__ : torch.Tensor = self.get_extended_attention_mask(lowercase_ ,lowercase_ ,lowercase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase__ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase__ : Dict = encoder_attention_mask[:, None, None, :]
lowerCAmelCase__ : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase__ : Any = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase__ : Tuple = self.get_head_mask(lowercase_ ,self.config.num_hidden_layers )
lowerCAmelCase__ : Any = self.embeddings(
input_ids=lowercase_ ,position_ids=lowercase_ ,token_type_ids=lowercase_ ,inputs_embeds=lowercase_ )
lowerCAmelCase__ : Tuple = self.encoder(
lowercase_ ,attention_mask=lowercase_ ,head_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,)
lowerCAmelCase__ : str = encoder_outputs[0]
lowerCAmelCase__ : Dict = self.pooler(lowercase_ )
lowerCAmelCase__ : Any = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Any ,lowercase_ : int ):
lowerCAmelCase__ : List[Any] = message
lowerCAmelCase__ : Any = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Any ):
super().__init__()
lowerCAmelCase__ : Optional[int] = BertPooler(lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : str = nn.Linear(config.hidden_size ,config.num_labels )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ):
# Pooler
lowerCAmelCase__ : Tuple = encoder_outputs[0]
lowerCAmelCase__ : Optional[int] = self.pooler(lowercase_ )
# "return" pooler_output
# BertModel
lowerCAmelCase__ : List[str] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase__ : List[str] = bmodel_output[1]
lowerCAmelCase__ : str = self.dropout(lowercase_ )
lowerCAmelCase__ : List[str] = self.classifier(lowercase_ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , a_ , )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : str ):
super().__init__(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = config.num_labels
lowerCAmelCase__ : Optional[int] = config.num_hidden_layers
lowerCAmelCase__ : Any = DeeBertModel(lowercase_ )
lowerCAmelCase__ : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Union[str, Any] = nn.Linear(config.hidden_size ,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Tuple=None ,lowercase_ : Optional[int]=None ,lowercase_ : List[Any]=None ,lowercase_ : int=None ,lowercase_ : Tuple=None ,lowercase_ : Optional[Any]=None ,lowercase_ : Optional[int]=None ,lowercase_ : Dict=-1 ,lowercase_ : Dict=False ,):
lowerCAmelCase__ : Tuple = self.num_layers
try:
lowerCAmelCase__ : Dict = self.bert(
lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,position_ids=lowercase_ ,head_mask=lowercase_ ,inputs_embeds=lowercase_ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase__ : Any = outputs[1]
lowerCAmelCase__ : str = self.dropout(lowercase_ )
lowerCAmelCase__ : List[Any] = self.classifier(lowercase_ )
lowerCAmelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : Dict = e.message
lowerCAmelCase__ : Dict = e.exit_layer
lowerCAmelCase__ : int = outputs[0]
if not self.training:
lowerCAmelCase__ : Union[str, Any] = entropy(lowercase_ )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : Optional[Any] = MSELoss()
lowerCAmelCase__ : str = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : int = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : Union[str, Any] = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : int = MSELoss()
lowerCAmelCase__ : str = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Optional[int] = CrossEntropyLoss()
lowerCAmelCase__ : Optional[int] = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowerCAmelCase__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Any = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 106 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowercase ( _a="" ):
snake_case_ : List[str] = tempfile.mkdtemp()
return os.path.join(_a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : str ):
snake_case_ : int = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Optional[int] = AgentAudio(lowercase_ )
snake_case_ : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
snake_case_, snake_case_ : int = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : List[str] = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16000 )
snake_case_ : Tuple = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : str = AgentImage(lowercase_ )
snake_case_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Optional[int] = Image.open(lowercase_ )
snake_case_ : Tuple = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : Dict = Image.open(lowercase_ )
snake_case_ : List[str] = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Any ):
snake_case_ : Tuple = '''Hey!'''
snake_case_ : Optional[Any] = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 264 | 0 |
from __future__ import annotations
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : int ) -> None:
a = data
a = None
a = None
def __magic_name__ ( A : Node | None ): # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __magic_name__ ( A : Node | None ):
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def __magic_name__ ( A : Node ):
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __magic_name__ ( ): # Main function for testing.
'''simple docstring'''
a = Node(1 )
a = Node(2 )
a = Node(3 )
a = Node(4 )
a = Node(5 )
a = Node(6 )
a = Node(7 )
a = Node(8 )
a = Node(9 )
print(is_full_binary_tree(A ) )
print(depth_of_tree(A ) )
print("Tree is: " )
display(A )
if __name__ == "__main__":
main()
| 107 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
"""simple docstring"""
import sys
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
lowerCAmelCase : Tuple = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase : List[Any] = a + chain_length - 1
lowerCAmelCase : List[Any] = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase : str = cost
lowerCAmelCase : Optional[int] = c
return matrix, sol
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if i == j:
print("A" + str(SCREAMING_SNAKE_CASE ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase , lowerCAmelCase : List[Any] = matrix_chain_order(SCREAMING_SNAKE_CASE )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE , 1 , n - 1 )
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : str = XLMRobertaTokenizer
_lowerCAmelCase : int = XLMRobertaTokenizerFast
_lowerCAmelCase : str = True
_lowerCAmelCase : Dict = True
def _snake_case ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[str] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : str ):
snake_case_ : List[Any] = '''<pad>'''
snake_case_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowercase_ ) , 1002 )
def _snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = XLMRobertaTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _snake_case ( self : List[str] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : int = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = tokenizer_r.save_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[Any] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[str] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : List[str] = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
snake_case_ : List[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : List[str] = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : List[Any] = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
snake_case_ : Tuple = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : Optional[Any] = tokenizer_r.from_pretrained(lowercase_ )
snake_case_ : Dict = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@cached_property
def _snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def _snake_case ( self : Optional[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : Any = XLMRobertaTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : List[Any] = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Dict = '''I was born in 92000, and this is falsé.'''
snake_case_ : Optional[int] = tokenizer.tokenize(lowercase_ )
snake_case_ : Tuple = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : str = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : int = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = '''Hello World!'''
snake_case_ : int = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
snake_case_ : Optional[int] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Dict ):
# fmt: off
snake_case_ : int = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 264 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _snake_case ( UpperCamelCase : List[str] ):
if isinstance(UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class SCREAMING_SNAKE_CASE__ :
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = {"""vision_model""": vision_model, """text_model""": text_model}
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = after_output[0].numpy()
UpperCAmelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Tuple = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F"Difference between torch and flax is {diff} (>= {tol})." )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.get_pretrained_model_and_inputs()
UpperCAmelCase : List[Any] = model_a(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model_a(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = after_outputs[0].numpy()
UpperCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase : int = 13
UpperCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Any = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : int = random_attention_mask([batch_size, 4] )
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFViTModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : Optional[Any] = TFBertModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFViTModelTester(self )
UpperCAmelCase : int = TFBertModelTester(self )
UpperCAmelCase : str = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
UpperCAmelCase : Union[str, Any] = 13
UpperCAmelCase : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Optional[Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase : Dict = to_atuple(vision_model.config.image_size )
UpperCAmelCase : Tuple = to_atuple(vision_model.config.patch_size )
UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCAmelCase : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCAmelCase : List[str] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFDeiTModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : Union[str, Any] = TFRobertaModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFDeiTModelTester(self )
UpperCAmelCase : List[Any] = TFRobertaModelTester(self )
UpperCAmelCase : List[Any] = vit_model_tester.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase : Optional[int] = 13
UpperCAmelCase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
UpperCAmelCase : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
UpperCAmelCase : Union[str, Any] = random_attention_mask([batch_size, 4] )
UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name="""vision_model""" )
UpperCAmelCase : List[str] = TFBertModel(_SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = TFCLIPVisionModelTester(self )
UpperCAmelCase : Dict = TFBertModelTester(self )
UpperCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = bert_model_tester.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase : Optional[Any] = vision_config_and_inputs
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCAmelCase : str = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 109 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """gpt_neox"""
def __init__( self : List[str] , lowercase_ : str=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[Any]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : List[str]=24576 , lowercase_ : List[Any]="gelu" , lowercase_ : str=0.25 , lowercase_ : Optional[int]=10000 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : int=0.1 , lowercase_ : Tuple=2048 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-5 , lowercase_ : str=True , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=False , lowercase_ : Optional[int]=True , lowercase_ : List[Any]=None , **lowercase_ : Optional[int] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = rotary_pct
snake_case_ : Dict = rotary_emb_base
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = hidden_dropout
snake_case_ : Tuple = classifier_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Any = use_cache
snake_case_ : Optional[int] = tie_word_embeddings
snake_case_ : Any = use_parallel_residual
snake_case_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
snake_case_ : Any = self.rope_scaling.get('''type''' , lowercase_ )
snake_case_ : Union[str, Any] = self.rope_scaling.get('''factor''' , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 264 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A: Optional[Any] = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
A: Optional[int] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def _snake_case ( UpperCamelCase : int , UpperCamelCase : int=False ):
UpperCAmelCase : int = create_model(
"""HTSAT-tiny""" , """roberta""" , _a , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_a , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = {}
UpperCAmelCase : List[Any] = r'''.*sequential.(\d+).*'''
UpperCAmelCase : str = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase : List[str] = key.replace(_a , _a )
if re.match(_a , _a ):
# replace sequential layers with list
UpperCAmelCase : Tuple = re.match(_a , _a ).group(1 )
UpperCAmelCase : List[Any] = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_a )//3}.linear." )
elif re.match(_a , _a ):
UpperCAmelCase : Optional[int] = int(re.match(_a , _a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase : Tuple = 1 if projecton_layer == 0 else 2
UpperCAmelCase : Optional[Any] = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase : Dict = value
UpperCAmelCase : Tuple = mixed_qkv.size(0 ) // 3
UpperCAmelCase : List[Any] = mixed_qkv[:qkv_dim]
UpperCAmelCase : int = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase : Union[str, Any] = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase : Optional[int] = query_layer
UpperCAmelCase : int = key_layer
UpperCAmelCase : Tuple = value_layer
else:
UpperCAmelCase : str = value
return model_state_dict
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=False ):
UpperCAmelCase : List[Any] = init_clap(_a , enable_fusion=_a )
clap_model.eval()
UpperCAmelCase : int = clap_model.state_dict()
UpperCAmelCase : Dict = rename_state_dict(_a )
UpperCAmelCase : str = ClapConfig()
UpperCAmelCase : List[str] = enable_fusion
UpperCAmelCase : List[Any] = ClapModel(_a )
# ignore the spectrogram embedding layer
model.load_state_dict(_a , strict=_a )
model.save_pretrained(_a )
transformers_config.save_pretrained(_a )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
A: List[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 109 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowercase__ : int = None
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = '''▁'''
lowercase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
lowercase__ : List[Any] = {
'''google/pegasus-xsum''': 5_12,
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = PegasusTokenizer
_lowerCAmelCase : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : Tuple="<pad>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<mask_2>" , lowercase_ : Optional[Any]="<mask_1>" , lowercase_ : str=None , lowercase_ : List[str]=103 , **lowercase_ : List[Any] , ):
snake_case_ : Dict = offset
if additional_special_tokens is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError(
f"additional_special_tokens should be of type {type(lowercase_ )}, but is"
f" {type(lowercase_ )}" )
snake_case_ : str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"<unk_{i}>" for i in range(len(lowercase_ ) , self.offset - 1 )
]
if len(set(lowercase_ ) ) != len(lowercase_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
snake_case_ : Union[str, Any] = additional_special_tokens_extended
else:
snake_case_ : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )]
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , pad_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , mask_token=lowercase_ , mask_token_sent=lowercase_ , offset=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
snake_case_ : List[Any] = vocab_file
snake_case_ : List[Any] = False if not self.vocab_file else True
def _snake_case ( self : str , lowercase_ : Union[str, Any] ):
snake_case_ : Any = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : int , lowercase_ : List , lowercase_ : Optional[List] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(lowercase_ )
elif token_ids_a is None:
return self._special_token_mask(lowercase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : Dict = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 264 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Dict = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=7 , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=37 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=None , ):
snake_case_ : Any = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : Optional[int] = is_training
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Optional[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : Tuple = num_labels
snake_case_ : str = num_choices
snake_case_ : Any = scope
snake_case_ : Dict = self.vocab_size - 1
def _snake_case ( self : int ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_token_type_ids:
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Dict = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , *lowercase_ : Dict ):
snake_case_ : List[Any] = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , *lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , *lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , *lowercase_ : Any ):
snake_case_ : int = self.num_labels
snake_case_ : Any = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : str = config_and_inputs
snake_case_ : str = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str]=False ):
snake_case_ : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ : int = inputs_dict['''labels''']
snake_case_ : Optional[Any] = inputs_dict['''labels''']
snake_case_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _snake_case ( self : Any ):
snake_case_ : List[str] = OpenAIGPTModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def _snake_case ( self : int ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ : List[str] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ : List[Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 264 | 0 |
from __future__ import annotations
a_ = 10
def __lowercase ( lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : str = 1
UpperCamelCase_ : str = max(_a )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase_ : list[list] = [[] for _ in range(_a )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase_ : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(_a )
# put each buckets' contents into list_of_ints
UpperCamelCase_ : Union[str, Any] = 0
for b in range(_a ):
for i in buckets[b]:
UpperCamelCase_ : int = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , lowercase_ : TransformeraDModel , lowercase_ : AutoencoderKL , lowercase_ : KarrasDiffusionSchedulers , lowercase_ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase_ , vae=lowercase_ , scheduler=lowercase_ )
# create a imagenet -> id dictionary for easier use
snake_case_ : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
snake_case_ : str = int(lowercase_ )
snake_case_ : Any = dict(sorted(self.labels.items() ) )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, List[str]] ):
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Tuple = list(lowercase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[int] , lowercase_ : List[int] , lowercase_ : float = 4.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
snake_case_ : Any = len(lowercase_ )
snake_case_ : List[str] = self.transformer.config.sample_size
snake_case_ : Union[str, Any] = self.transformer.config.in_channels
snake_case_ : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase_ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ : Optional[int] = torch.tensor(lowercase_ , device=self.device ).reshape(-1 )
snake_case_ : Dict = torch.tensor([1000] * batch_size , device=self.device )
snake_case_ : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ : List[Any] = latent_model_input[: len(lowercase_ ) // 2]
snake_case_ : Union[str, Any] = torch.cat([half, half] , dim=0 )
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : int = t
if not torch.is_tensor(lowercase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[str] = torch.floataa if is_mps else torch.floataa
else:
snake_case_ : Optional[int] = torch.intaa if is_mps else torch.intaa
snake_case_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ : List[Any] = self.transformer(
lowercase_ , timestep=lowercase_ , class_labels=lowercase_ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_, snake_case_ : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_, snake_case_ : Any = torch.split(lowercase_ , len(lowercase_ ) // 2 , dim=0 )
snake_case_ : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ : str = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_, snake_case_ : Optional[Any] = torch.split(lowercase_ , lowercase_ , dim=1 )
else:
snake_case_ : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ : int = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
if guidance_scale > 1:
snake_case_, snake_case_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ : Dict = latent_model_input
snake_case_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
snake_case_ : Tuple = self.vae.decode(lowercase_ ).sample
snake_case_ : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase_ )
| 264 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
if len(_a ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_UpperCAmelCase : str = sum(array[:k] )
for i in range(len(_a ) - k ):
_UpperCAmelCase : int = current_sum - array[i] + array[i + k]
_UpperCAmelCase : Optional[Any] = max(_a , _a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ : List[str] = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
A_ : List[Any] = randint(0, 1_1_0)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 215 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 0 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 70 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_a = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 61 |
"""simple docstring"""
import numpy as np
def __lowercase ( _a ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class A__:
"""simple docstring"""
_A : Tuple = LEDConfig
_A : List[Any] = {}
_A : Tuple = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , _lowercase=4 , ) -> Tuple:
a_ : List[str] = parent
a_ : Union[str, Any] = batch_size
a_ : Dict = seq_length
a_ : Optional[Any] = is_training
a_ : List[Any] = use_labels
a_ : Any = vocab_size
a_ : str = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : Union[str, Any] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : List[Any] = eos_token_id
a_ : Any = pad_token_id
a_ : Tuple = bos_token_id
a_ : Union[str, Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a_ : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a_ : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a_ : int = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
a_ : Optional[int] = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
a_ : List[str] = global_attention_mask
return config, inputs_dict
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Union[str, Any]:
a_ : List[str] = TFLEDModel(config=lowercase_ ).get_decoder()
a_ : str = inputs_dict['''input_ids''']
a_ : Optional[int] = input_ids[:1, :]
a_ : List[str] = inputs_dict['''attention_mask'''][:1, :]
a_ : List[Any] = 1
# first forward pass
a_ : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
a_ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ : int = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ : int = model(lowercase_ , attention_mask=lowercase_ )[0]
a_ : Any = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
a_ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def _UpperCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , ):
'''simple docstring'''
if attention_mask is None:
a_ : Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
a_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
a_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
a_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class A__(lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_A : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_A : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : Any = True
_A : Tuple = False
_A : Optional[Any] = False
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> Dict:
a_ : Union[str, Any] = TFLEDModelTester(self )
a_ : Tuple = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase__ ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Tuple:
a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Union[str, Any] = tf.zeros_like(inputs_dict["""attention_mask"""] )
a_ : Dict = 2
a_ : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
a_ : Tuple = True
a_ : List[Any] = self.model_tester.seq_length
a_ : int = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowercase ):
a_ : str = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowercase ):
a_ : Any = [t.numpy() for t in outputs.encoder_attentions]
a_ : Any = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a_ : Optional[Any] = True
a_ : Tuple = False
a_ : int = False
a_ : Optional[int] = model_class(lowercase_ )
a_ : str = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
a_ : Any = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
a_ : str = model_class(lowercase_ )
a_ : Any = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a_ : List[Any] = True
a_ : Any = model_class(lowercase_ )
a_ : Tuple = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
a_ : Optional[Any] = True
a_ : List[str] = True
a_ : int = model_class(lowercase_ )
a_ : str = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
# TODO: Head-masking not yet implement
pass
def _UpperCAmelCase ( a__):
'''simple docstring'''
return tf.constant(_a , dtype=tf.intaa)
__snake_case : Optional[int] = 1e-4
@slow
@require_tf
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
a_ : Tuple = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
a_ : str = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
a_ : Optional[int] = model(**lowercase_ )[0]
a_ : Optional[Any] = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
a_ : Optional[Any] = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : int = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
a_ : Any = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ : Optional[int] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
a_ : Tuple = model(**lowercase_ )[0]
a_ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
a_ : Any = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 )
| 248 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : int ):
snake_case_ : Dict = params
snake_case_ : Union[str, Any] = np.array(lowercase_ )
snake_case_ : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , lowercase_ : Union[str, Any] ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _snake_case ( self : Tuple ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self : Tuple ):
snake_case_ : str = self.params.max_model_input_size
snake_case_ : Dict = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(lowercase_ : Tuple , lowercase_ : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
snake_case_ : Tuple = []
snake_case_ : Any = []
if self.params.mlm:
snake_case_, snake_case_ : Union[str, Any] = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
snake_case_, snake_case_ : Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Dict = np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
snake_case_ : Tuple = np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
snake_case_ : List[str] = np.array(lowercase_ )
snake_case_ : Optional[Any] = np.array(lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : List[Any] = len(self )
snake_case_ : List[str] = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : Dict = self.lengths[indices]
snake_case_ : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _snake_case ( self : Tuple ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : str = self.params.special_tok_ids['''unk_token''']
snake_case_ : str = len(self )
snake_case_ : int = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : str = (unk_occs / self.lengths) < 0.5
snake_case_ : Optional[Any] = self.token_ids[indices]
snake_case_ : Optional[int] = self.lengths[indices]
snake_case_ : Dict = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _snake_case ( self : Dict ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self : List[str] , lowercase_ : Dict ):
snake_case_ : Optional[int] = [t[0] for t in batch]
snake_case_ : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
snake_case_ : str = max(lowercase_ )
# Pad token ids
if self.params.mlm:
snake_case_ : Tuple = self.params.special_tok_ids['''pad_token''']
else:
snake_case_ : Dict = self.params.special_tok_ids['''unk_token''']
snake_case_ : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
snake_case_ : str = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[int] = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 264 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ : List[Any] = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __lowercase ( _a , _a , _a = "x" , _a = 10**-10 , _a = 1 , ):
snake_case_ : Any = symbols(_a )
snake_case_ : int = lambdify(_a , _a )
snake_case_ : Optional[Any] = lambdify(_a , diff(_a , _a ) )
snake_case_ : Optional[Any] = starting_point
while True:
if diff_function(_a ) != 0:
snake_case_ : Optional[int] = prev_guess - multiplicity * func(_a ) / diff_function(
_a )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 264 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase :Optional[int] = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
"""simple docstring"""
import json
import sys
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[Any] ):
'''simple docstring'''
with open(_a , encoding='utf-8' ) as f:
lowercase = json.load(_a )
lowercase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(_a ):
lowercase = results[benchmark_name]
lowercase = benchmark_name.split('/' )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
lowercase = '''| metric |'''
lowercase = '''|--------|'''
lowercase = '''| new / old (diff) |'''
for metric_name in sorted(_a ):
lowercase = benchmark_res[metric_name]
lowercase = metric_vals['''new''']
lowercase = metric_vals.get('old' , _a )
lowercase = metric_vals.get('diff' , _a )
lowercase = f' {new_val:f}' if isinstance(_a , (int, float) ) else '''None'''
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(_a , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(_a , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_a ) )
if __name__ == "__main__":
_UpperCamelCase : Any = sys.argv[1]
_UpperCamelCase : Optional[int] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 220 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
from __future__ import annotations
_UpperCAmelCase : Union[str, Any] ='''#'''
class snake_case__:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase_ : dict = {}
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : int = self._trie
for char in text:
if char not in trie:
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : List[Any] = trie[char]
lowerCAmelCase_ : Tuple = True
def lowercase_ ( self , __lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase_ : int = trie[char]
else:
return []
return self._elements(lowercase_ )
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = []
for c, v in d.items():
lowerCAmelCase_ : int = [''' '''] if c == END else [(c + s) for s in self._elements(lowercase_ )]
result.extend(lowercase_ )
return tuple(lowercase_ )
_UpperCAmelCase : Union[str, Any] =Trie()
_UpperCAmelCase : Optional[Any] =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Union[str, Any] = trie.find_word(_a )
return tuple(string + word for word in suffixes )
def lowerCAmelCase ( )-> Any:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 262 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.