code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any]=7, lowerCamelCase : str=3, lowerCamelCase : Any=10, lowerCamelCase : Dict=18, lowerCamelCase : int=30, lowerCamelCase : Union[str, Any]=400, lowerCamelCase : Optional[Any]=True, lowerCamelCase : List[Any]=None, lowerCamelCase : List[str]=True, lowerCamelCase : List[str]=[0.5, 0.5, 0.5], lowerCamelCase : int=[0.5, 0.5, 0.5], lowerCamelCase : Tuple=None, )-> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] =size if size is not None else {'''shortest_edge''': 18}
lowerCamelCase__ : Union[str, Any] =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : int =num_channels
lowerCamelCase__ : List[Any] =num_frames
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : List[str] =min_resolution
lowerCamelCase__ : Any =max_resolution
lowerCamelCase__ : Union[str, Any] =do_resize
lowerCamelCase__ : Union[str, Any] =size
lowerCamelCase__ : int =do_normalize
lowerCamelCase__ : int =image_mean
lowerCamelCase__ : Optional[int] =image_std
lowerCamelCase__ : int =crop_size
def snake_case ( self : Tuple )-> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = VivitImageProcessor if is_vision_available() else None
def snake_case ( self : Tuple )-> Tuple:
lowerCamelCase__ : str =VivitImageProcessingTester(self )
@property
def snake_case ( self : Any )-> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : str )-> List[str]:
lowerCamelCase__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
def snake_case ( self : Union[str, Any] )-> Dict:
lowerCamelCase__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowerCamelCase__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def snake_case ( self : str )-> int:
# Initialize image_processing
lowerCamelCase__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase__ : Tuple =prepare_video_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] =image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : Tuple =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def snake_case ( self : str )-> Dict:
# Initialize image_processing
lowerCamelCase__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Union[str, Any] =prepare_video_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
lowerCamelCase__ : Tuple =image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : str =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def snake_case ( self : Tuple )-> Dict:
# Initialize image_processing
lowerCamelCase__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Any =prepare_video_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any =image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowerCamelCase__ : List[Any] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 238
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCamelCase__ : Dict =GPTaConfig()
else:
lowerCamelCase__ : Tuple =GPTaConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowerCamelCase__ : List[str] =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase__ : int =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_lowercase : Any = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 238
| 1
|
def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__lowerCAmelCase ) == 1:
return True
__UpperCAmelCase : Any = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _UpperCamelCase ( snake_case__ ) -> float:
if not isinstance(__lowerCAmelCase, __lowerCAmelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
__UpperCAmelCase : Any = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342
| 0
|
import os
from math import logaa
def __magic_name__ ( __lowerCAmelCase : str = "base_exp.txt" ) -> int:
__lowerCamelCase = 0
__lowerCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) ):
__lowerCamelCase , __lowerCamelCase = list(map(__lowerCAmelCase , line.split(''',''' ) ) )
if x * logaa(__lowerCAmelCase ) > largest:
__lowerCamelCase = x * logaa(__lowerCAmelCase )
__lowerCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 270
|
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270
| 1
|
'''simple docstring'''
def _A ( snake_case ) -> int:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowercase : Dict = len(__a )
_lowercase : Optional[int] = max(__a )
_lowercase : Optional[Any] = min(__a )
# create the counting array
_lowercase : Any = coll_max + 1 - coll_min
_lowercase : str = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __a ):
_lowercase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowercase : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __a ) ):
_lowercase : int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _A ( snake_case ) -> Any:
return "".join([chr(__a ) for i in counting_sort([ord(__a ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 350
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
_snake_case = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<unk>" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[MASK]" , _UpperCamelCase="[CLS]" , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
_lowercase : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
_lowercase : Optional[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
_lowercase : Union[str, Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
_lowercase : Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
_lowercase : str = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Any = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
_lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
_lowercase : str = vocab_file
_lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_lowercase : str = self.__dict__.copy()
_lowercase : Union[str, Any] = None
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowercase : Optional[Any] = {}
_lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.piece_to_id(_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = self.sp_model.IdToPiece(_UpperCamelCase )
return token
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = []
_lowercase : int = ""
_lowercase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_lowercase : Union[str, Any] = True
_lowercase : Optional[int] = []
else:
current_sub_tokens.append(_UpperCamelCase )
_lowercase : str = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Any = kwargs.pop("use_source_tokenizer" , _UpperCamelCase )
_lowercase : Dict = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowercase : Dict = []
_lowercase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
_lowercase : Optional[Any] = []
sub_texts.append(_UpperCamelCase )
else:
current_sub_text.append(_UpperCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_lowercase : List[str] = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(_UpperCamelCase ) )
else:
_lowercase : List[Any] = "".join(_UpperCamelCase )
_lowercase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowercase : Tuple = self.clean_up_tokenization(_UpperCamelCase )
return clean_text
else:
return text
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Optional[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
_lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
_lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
_lowercase : str = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 199
| 0
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 356
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1
| 0
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_lowerCAmelCase : Optional[Any] = len(__a) - 1
def snake_case__ ( self, __a):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCAmelCase : list[float] = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree, __a) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__a), 5) == 1
return output_values
def snake_case__ ( self, __a):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_lowerCAmelCase : Tuple = self.basis_function(__a)
_lowerCAmelCase : Any = 0.0
_lowerCAmelCase : Optional[int] = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self, __a = 0.01):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
_lowerCAmelCase : list[float] = [] # x coordinates of points to plot
_lowerCAmelCase : list[float] = [] # y coordinates of points to plot
_lowerCAmelCase : List[str] = 0.0
while t <= 1:
_lowerCAmelCase : int = self.bezier_curve_function(__a)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
_lowerCAmelCase : List[Any] = [i[0] for i in self.list_of_points]
_lowerCAmelCase : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
__a, __a, color="blue", label="Curve of Degree " + str(self.degree), )
plt.scatter(__a, __a, color="red", label="Control Points")
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 36
|
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
__a : Any = x
__a : List[Any] = y
for step in range(lowerCAmelCase__ ): # noqa: B007
__a : List[Any] = a * a - b * b + x
__a : Tuple = 2 * a * b + y
__a : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def __UpperCamelCase ( lowerCAmelCase__ : int = 8_0_0 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = -0.6 , lowerCAmelCase__ : float = 0 , lowerCAmelCase__ : float = 3.2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : bool = True , ):
__a : int = Image.new('''RGB''' , (image_width, image_height) )
__a : Dict = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
__a : Optional[Any] = figure_width / image_width * image_height
__a : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a : Optional[int] = get_color_coded_rgb(lowerCAmelCase__ )
else:
__a : Optional[Any] = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase__ =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 216
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any=2 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Union[str, Any]=32 * 4 , lowerCamelCase__ : Optional[Any]=32 * 6 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Optional[int]=32 , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : Tuple = use_auxiliary_loss
UpperCamelCase__ : str = num_queries
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : List[str] = min_size
UpperCamelCase__ : Optional[Any] = max_size
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : str = mask_feature_size
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
UpperCamelCase__ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
UpperCamelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = output.encoder_hidden_states
UpperCamelCase__ : List[str] = output.pixel_decoder_hidden_states
UpperCamelCase__ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
UpperCamelCase__ : List[Any] = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : int = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase):
A: str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A: str = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A: List[str] = False
A: List[Any] = False
A: Any = False
A: Optional[int] = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = MaskFormerModelTester(self )
UpperCamelCase__ : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = model_class(lowerCamelCase__ )
UpperCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Tuple = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ : Any = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCamelCase__ : Dict = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
UpperCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
UpperCamelCase__ : List[str] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
UpperCamelCase__ : Tuple = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : List[str] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ : str = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
UpperCamelCase__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase : str = 1E-4
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Any = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ )
UpperCamelCase__ : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
UpperCamelCase__ : Dict = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
UpperCamelCase__ : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : int = self.default_image_processor
UpperCamelCase__ : Union[str, Any] = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
UpperCamelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Union[str, Any] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ : str = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
UpperCamelCase__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ : Optional[int] = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : str = prepare_img()
UpperCamelCase__ : Optional[Any] = image_processor(lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
UpperCamelCase__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
UpperCamelCase__ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
UpperCamelCase__ : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ : List[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCamelCase__ )
.eval()
)
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : Dict = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCamelCase__ : Optional[Any] = inputs['''pixel_values'''].to(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
UpperCamelCase__ : List[str] = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 51
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
UpperCamelCase__ : str = '''backbone.''' if is_semantic else ''''''
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[int] = q_bias
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase__ : Any = gamma_a
UpperCamelCase__ : str = gamma_a
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = val
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json'''
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : int = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
UpperCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = outputs.logits
# verify logits
UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 51
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Dict = load_tool('''text-to-speech''' )
self.tool.setup()
def __lowerCAmelCase ( self : Any ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = self.tool('''hey''' )
lowerCAmelCase__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) ,) )
def __lowerCAmelCase ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = self.tool('''hey''' )
lowerCAmelCase__ : Dict = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] ,torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) ,) )
| 106
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''CLIPFeatureExtractor''']
__UpperCamelCase : Optional[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase = 1 , lowercase = 1000 ) -> int:
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for divide_by_number in range(lowercase , digit + 1 ):
__lowerCAmelCase = []
__lowerCAmelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = divide_by_number
else:
has_been_divided.append(lowercase )
__lowerCAmelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : str =KandinskyVaaInpaintPipeline
a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
a : str =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Dict =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = DDIMScheduler(
num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase = np.ones((64, 64),dtype=np.floataa )
__lowerCAmelCase = 0
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa )
__lowerCAmelCase = 0
__lowerCAmelCase = """a hat"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : List[str] = getattr(__lowerCamelCase , __lowerCamelCase)
if weight_type is not None:
SCREAMING_SNAKE_CASE : str = getattr(__lowerCamelCase , __lowerCamelCase).shape
else:
SCREAMING_SNAKE_CASE : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Dict = value
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Tuple = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Optional[int] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("w2v_model.")[-1] == name.split(".")[0] and not is_finetuned):
SCREAMING_SNAKE_CASE : List[str] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : Any = name.split(__lowerCamelCase)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : Tuple = mapped_key.replace("*" , __lowerCamelCase)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : int = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_v'''
elif "weight" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''bias'''
else:
SCREAMING_SNAKE_CASE : List[Any] = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
continue
if not is_used:
unused_weights.append(__lowerCamelCase)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Union[str, Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : Dict = name.split(".")
SCREAMING_SNAKE_CASE : List[Any] = int(items[0])
SCREAMING_SNAKE_CASE : str = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(__lowerCamelCase)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : Dict = HubertConfig.from_pretrained(__lowerCamelCase)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = Dictionary.load(__lowerCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : Tuple = target_dict.pad_index
SCREAMING_SNAKE_CASE : Tuple = target_dict.bos_index
SCREAMING_SNAKE_CASE : int = target_dict.eos_index
SCREAMING_SNAKE_CASE : List[Any] = len(target_dict.symbols)
SCREAMING_SNAKE_CASE : Tuple = os.path.join(__lowerCamelCase , "vocab.json")
if not os.path.isdir(__lowerCamelCase):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase))
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase)
with open(__lowerCamelCase , "w" , encoding="utf-8") as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase)
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCamelCase , )
SCREAMING_SNAKE_CASE : Any = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase)
processor.save_pretrained(__lowerCamelCase)
SCREAMING_SNAKE_CASE : str = HubertForCTC(__lowerCamelCase)
else:
SCREAMING_SNAKE_CASE : List[str] = HubertModel(__lowerCamelCase)
if is_finetuned:
SCREAMING_SNAKE_CASE : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
SCREAMING_SNAKE_CASE : List[Any] = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
hf_wavavec.save_pretrained(__lowerCamelCase)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 76
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any, *lowerCamelCase : str, **lowerCamelCase : Optional[Any] )-> None:
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 238
| 0
|
import torch
from transformers import AutoModel
class _snake_case ( torch.nn.Module ):
def __init__( self , a="sayef/fsner-bert-base-uncased") -> Optional[Any]:
super(__snake_case , self).__init__()
SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(__snake_case , return_dict=__snake_case)
SCREAMING_SNAKE_CASE = torch.nn.CosineSimilarity(3 , 1E-08)
SCREAMING_SNAKE_CASE = torch.nn.Softmax(dim=1)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Dict:
return self.bert(**__snake_case).last_hidden_state
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
return token_embeddings.sum(2 , keepdim=__snake_case)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=1) -> int:
return self.softmax(T * self.cos(__snake_case , __snake_case))
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> str:
SCREAMING_SNAKE_CASE = W_supports['sizes'].tolist()
SCREAMING_SNAKE_CASE = W_supports['start_token_id'].item()
SCREAMING_SNAKE_CASE = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE = self.BERT(**__snake_case)
SCREAMING_SNAKE_CASE = self.BERT(**__snake_case)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = W_supports['input_ids'] == start_token_id
SCREAMING_SNAKE_CASE = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case):
if i == 0:
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = support_sizes[i - 1]
SCREAMING_SNAKE_CASE = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_start.T).sum(1).softmax(0)
SCREAMING_SNAKE_CASE = torch.matmul(q[i] , s_end.T).sum(1).softmax(0)
if p_starts is not None:
SCREAMING_SNAKE_CASE = torch.vstack((p_starts, p_start))
SCREAMING_SNAKE_CASE = torch.vstack((p_ends, p_end))
else:
SCREAMING_SNAKE_CASE = p_start
SCREAMING_SNAKE_CASE = p_end
return p_starts, p_ends
| 358
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : str = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
a_ : List[Any] = {'allegro/herbert-base-cased': 5_14}
a_ : Dict = {}
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = HerbertTokenizer
def __init__( self , a=None , a=None , a=None , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a="</s>" , **a , ) -> Dict:
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 327
| 0
|
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
__lowerCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __SCREAMING_SNAKE_CASE( self , _A , _A , *_A , **_A ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__lowerCAmelCase = kwargs.pop("main_process_only" , _A )
__lowerCAmelCase = kwargs.pop("in_order" , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
__lowerCAmelCase , __lowerCAmelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
__lowerCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCAmelCase , __lowerCAmelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str = None ):
if log_level is None:
__lowerCAmelCase = os.environ.get("ACCELERATE_LOG_LEVEL" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = logging.getLogger(SCREAMING_SNAKE_CASE_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE_ , {} )
| 92
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ ( snake_case__ ):
_a : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_A ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(_A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name )
__lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx )
__lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode )
__lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_A )
_a : str = field(
default=snake_case__ , metadata={"""help""": """Name of TPU"""} , )
_a : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} )
_a : bool = field(
default=snake_case__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.n_gpu > 0
| 92
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Tuple ):
__lowerCamelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowerCamelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__lowerCamelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _snake_case ( self: Tuple ):
print(F'Found {torch.cuda.device_count()} devices.' )
__lowerCamelCase : Tuple = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self: Dict ):
print(F'Found {torch.cuda.device_count()} devices.' )
__lowerCamelCase : Tuple = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self: str ):
__lowerCamelCase : Any = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self: str ):
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
__lowerCamelCase : str = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ''
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 194
|
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = len(SCREAMING_SNAKE_CASE__ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return arr, 0
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) // 2
__lowerCamelCase : Union[str, Any] = arr[0:mid]
__lowerCamelCase : List[Any] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : List[str] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Dict = _count_cross_inversions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ) and j < len(SCREAMING_SNAKE_CASE__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCamelCase__ ( ):
__lowerCamelCase : Optional[int] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
# an empty list should also have zero inversions
__lowerCamelCase : List[str] = []
__lowerCamelCase : Dict = count_inversions_bf(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 194
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[Any] = "transfo-xl"
_snake_case : Optional[Any] = ["mems"]
_snake_case : List[str] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Tuple=267735 , lowerCAmelCase__ : List[str]=[20000, 40000, 200000] , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : List[str]=1024 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : Optional[int]=64 , lowerCAmelCase__ : int=4096 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]=18 , lowerCAmelCase__ : List[Any]=1600 , lowerCAmelCase__ : Tuple=1000 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : List[Any]=-1 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]="normal" , lowerCAmelCase__ : Optional[int]=0.01 , lowerCAmelCase__ : int=0.01 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : str=1e-5 , lowerCAmelCase__ : Dict=0 , **lowerCAmelCase__ : Optional[int] , ) -> str:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = []
self.cutoffs.extend(snake_case__ )
if proj_share_all_but_first:
_UpperCamelCase = [False] + [True] * len(self.cutoffs )
else:
_UpperCamelCase = [False] + [False] * len(self.cutoffs )
_UpperCamelCase = d_model
_UpperCamelCase = d_embed
_UpperCamelCase = d_head
_UpperCamelCase = d_inner
_UpperCamelCase = div_val
_UpperCamelCase = pre_lnorm
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = mem_len
_UpperCamelCase = same_length
_UpperCamelCase = attn_type
_UpperCamelCase = clamp_len
_UpperCamelCase = sample_softmax
_UpperCamelCase = adaptive
_UpperCamelCase = dropout
_UpperCamelCase = dropatt
_UpperCamelCase = untie_r
_UpperCamelCase = init
_UpperCamelCase = init_range
_UpperCamelCase = proj_init_std
_UpperCamelCase = init_std
_UpperCamelCase = layer_norm_epsilon
super().__init__(eos_token_id=snake_case__ , **snake_case__ )
@property
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 324
|
"""simple docstring"""
A__ : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 144
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase : Optional[Any] =4
__lowerCAmelCase : int =3
class _A ( lowerCAmelCase ):
pass
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = int(os.environ["""RANK"""] )
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCAmelCase__ )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ )
parser.add_argument("""--num_workers""" , type=lowerCAmelCase__ , default=0 )
lowercase = parser.parse_args()
lowercase = args.streaming
lowercase = args.num_workers
lowercase = {"""shards""": [f'shard_{shard_idx}' for shard_idx in range(lowerCAmelCase__ )]}
lowercase = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
lowercase = Dataset.from_list(list(lowerCAmelCase__ ) )
lowercase = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
lowercase = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
lowercase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = StableDiffusionInstructPixaPixPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE )
lowercase__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
lowercase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : Any = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Dict ):
lowercase__ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Any = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sd_pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[Any] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.get_dummy_components()
lowercase__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = "french fries"
lowercase__ : Optional[int] = sd_pipe(**SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = output.images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : Dict ):
lowercase__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [inputs["prompt"]] * 2
lowercase__ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
lowercase__ : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = image / 2 + 0.5
lowercase__ : Any = image.permute(0 , 3 , 1 , 2 )
lowercase__ : Dict = image.repeat(2 , 1 , 1 , 1 )
lowercase__ : List[str] = sd_pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : List[str] ):
lowercase__ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : int = self.get_dummy_components()
lowercase__ : Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
lowercase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : int = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : int = [round(SCREAMING_SNAKE_CASE , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(SCREAMING_SNAKE_CASE ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : str = StableDiffusionInstructPixaPixPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = VaeImageProcessor(do_resize=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Any = pipe(**self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE , input_image_type="pt" ) )[0]
lowercase__ : Optional[int] = components["vae"]
lowercase__ : List[str] = self.get_dummy_inputs_by_type(SCREAMING_SNAKE_CASE , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ : Any = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ : int = pipe(**SCREAMING_SNAKE_CASE )[0]
lowercase__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(SCREAMING_SNAKE_CASE , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any=0 ):
lowercase__ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
lowercase__ : Dict = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Tuple = self.get_inputs()
lowercase__ : Dict = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any] = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : int = self.get_inputs()
lowercase__ : Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : Any ):
lowercase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Optional[Any] = self.get_inputs()
lowercase__ : Any = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : Any = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def snake_case ( self : str ):
lowercase__ : Dict = 0
def callback_fn(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor ) -> None:
lowercase__ : int = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__ : int = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ : str = latents[0, -3:, -3:, -1]
lowercase__ : Tuple = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase__ : Any = False
lowercase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
lowercase__ : str = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Any = self.get_inputs()
pipe(**SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case ( self : Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
lowercase__ : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[Any] = self.get_inputs()
lowercase__ : List[str] = pipe(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : str = inputs["image"].resize((504, 504) )
lowercase__ : List[Any] = "timbrooks/instruct-pix2pix"
lowercase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : List[str] = pipe(**SCREAMING_SNAKE_CASE )
lowercase__ : str = output.images[0]
lowercase__ : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ : Optional[Any] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 130
|
lowerCAmelCase__ = 0 # The first color of the flag.
lowerCAmelCase__ = 1 # The second color of the flag.
lowerCAmelCase__ = 2 # The third color of the flag.
lowerCAmelCase__ = (red, white, blue)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if not sequence:
return []
if len(lowerCamelCase__ ) == 1:
return list(lowerCamelCase__ )
lowercase__ : List[Any] = 0
lowercase__ : Any = len(lowerCamelCase__ ) - 1
lowercase__ : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowercase__ , lowercase__ : int = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowercase__ , lowercase__ : Union[str, Any] = sequence[high], sequence[mid]
high -= 1
else:
lowercase__ : Tuple = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowerCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 130
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class __snake_case ( _snake_case ):
"""simple docstring"""
_lowerCamelCase = ["""pixel_values"""]
def __init__( self , __lowerCamelCase = True , __lowerCamelCase = 32 , __lowerCamelCase=PILImageResampling.BILINEAR , __lowerCamelCase = True , **__lowerCamelCase , ):
'''simple docstring'''
__A : Any = do_resize
__A : Any = do_rescale
__A : Dict = size_divisor
__A : List[str] = resample
super().__init__(**UpperCamelCase__ )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ):
'''simple docstring'''
__A , __A : str = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
__A : int = height // size_divisor * size_divisor
__A : List[Any] = width // size_divisor * size_divisor
__A : int = resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
return image
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , **__lowerCamelCase ):
'''simple docstring'''
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase=None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = ChannelDimension.FIRST , **__lowerCamelCase , ):
'''simple docstring'''
__A : List[str] = do_resize if do_resize is not None else self.do_resize
__A : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : Tuple = size_divisor if size_divisor is not None else self.size_divisor
__A : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__A : Union[str, Any] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__A : Union[str, Any] = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
__A : Union[str, Any] = [self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
__A : Union[str, Any] = [self.rescale(UpperCamelCase__ , scale=1 / 255 ) for image in images]
__A : Any = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__A : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 363
|
"""simple docstring"""
def __lowercase ( ) ->Tuple:
'''simple docstring'''
__A : str = []
__A : List[Any] = 1
while len(snake_case_ ) < 1e6:
constant.append(str(snake_case_ ) )
i += 1
__A : Any = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 291
| 0
|
UpperCAmelCase : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase : Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : Union[str, Any] =True
a__ : Any =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
order.append(SCREAMING_SNAKE_CASE )
return order
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : List[str] =True
a__ : Tuple =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return component
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
"""simple docstring"""
a__ : str =len(SCREAMING_SNAKE_CASE ) * [False]
a__ : dict[int, list[int]] ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =[]
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
a__ : Any =order[len(SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
a__ : List[str] =find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
components_list.append(SCREAMING_SNAKE_CASE )
return components_list
| 95
|
'''simple docstring'''
import math
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = input("Enter message: " )
UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) )
UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = [""] * key
for col in range(snake_case_ ):
UpperCAmelCase_ = col
while pointer < len(snake_case_ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key )
UpperCAmelCase_ = key
UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ )
UpperCAmelCase_ = [""] * num_cols
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase_ = 0
row += 1
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
| 0
|
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return round(float(moles / volume) * nfactor)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return round(float((moles * 0.08_21 * temperature) / (volume)))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return round(float((moles * 0.08_21 * temperature) / (pressure)))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return round(float((pressure * volume) / (0.08_21 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
|
from scipy.stats import pearsonr
import datasets
a_ : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ : Optional[int] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ : Any = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=False) -> Optional[Any]:
if return_pvalue:
SCREAMING_SNAKE_CASE = pearsonr(a , a)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a , a)[0])}
| 327
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = '''ResNetConfig'''
# Base docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = [1, 20_48, 7, 7]
# Image classification docstring
_lowercase = '''microsoft/resnet-50'''
_lowercase = '''tiger cat'''
_lowercase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 3 ,A_ : int = 1 ,A_ : str = "relu" ) -> Tuple:
super().__init__()
A = nn.Convad(
A_ ,A_ ,kernel_size=A_ ,stride=A_ ,padding=kernel_size // 2 ,bias=A_ )
A = nn.BatchNormad(A_ )
A = ACTaFN[activation] if activation is not None else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : ResNetConfig ) -> Union[str, Any]:
super().__init__()
A = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
A = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
A = config.num_channels
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ) -> Tensor:
A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
A = self.embedder(A_ )
A = self.pooler(A_ )
return embedding
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : int ,A_ : int ,A_ : int = 2 ) -> Optional[int]:
super().__init__()
A = nn.Convad(A_ ,A_ ,kernel_size=1 ,stride=A_ ,bias=A_ )
A = nn.BatchNormad(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tensor ) -> Tensor:
A = self.convolution(A_ )
A = self.normalization(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ) -> Optional[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : int ,A_ : int ,A_ : int = 1 ,A_ : str = "relu" ,A_ : int = 4 ) -> Dict:
super().__init__()
A = in_channels != out_channels or stride != 1
A = out_channels // reduction
A = (
ResNetShortCut(A_ ,A_ ,stride=A_ ) if should_apply_shortcut else nn.Identity()
)
A = nn.Sequential(
ResNetConvLayer(A_ ,A_ ,kernel_size=1 ) ,ResNetConvLayer(A_ ,A_ ,stride=A_ ) ,ResNetConvLayer(A_ ,A_ ,kernel_size=1 ,activation=A_ ) ,)
A = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[Any]:
A = hidden_state
A = self.layer(A_ )
A = self.shortcut(A_ )
hidden_state += residual
A = self.activation(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : ResNetConfig ,A_ : int ,A_ : int ,A_ : int = 2 ,A_ : int = 2 ,) -> Tuple:
super().__init__()
A = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ ,A_ ,stride=A_ ,activation=config.hidden_act ) ,*[layer(A_ ,A_ ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tensor ) -> Tensor:
A = input
for layer in self.layers:
A = layer(A_ )
return hidden_state
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : ResNetConfig ) -> Dict:
super().__init__()
A = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ ,config.depths[1:] ):
self.stages.append(ResNetStage(A_ ,A_ ,A_ ,depth=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tensor ,A_ : bool = False ,A_ : bool = True ) -> BaseModelOutputWithNoAttention:
A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A = hidden_states + (hidden_state,)
A = stage_module(A_ )
if output_hidden_states:
A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ ,hidden_states=A_ ,)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = ResNetConfig
_lowerCamelCase: List[Any] = '''resnet'''
_lowerCamelCase: int = '''pixel_values'''
_lowerCamelCase: int = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ) -> Any:
if isinstance(A_ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Tuple=False ) -> str:
if isinstance(A_ ,A_ ):
A = value
_lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ) -> Dict:
super().__init__(A_ )
A = config
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.embedder(A_ )
A = self.encoder(
A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = encoder_outputs[0]
A = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ ,pooler_output=A_ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Any ) -> List[Any]:
super().__init__(A_ )
A = config.num_labels
A = ResNetModel(A_ )
# classification head
A = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A_ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[torch.FloatTensor] = None ,A_ : Optional[torch.LongTensor] = None ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = self.resnet(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.pooler_output if return_dict else outputs[1]
A = self.classifier(A_ )
A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A = 'single_label_classification'
else:
A = 'multi_label_classification'
if self.config.problem_type == "regression":
A = MSELoss()
if self.num_labels == 1:
A = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A = loss_fct(A_ ,A_ )
elif self.config.problem_type == "single_label_classification":
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A = BCEWithLogitsLoss()
A = loss_fct(A_ ,A_ )
if not return_dict:
A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ ,logits=A_ ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
def __init__( self : int ,A_ : Union[str, Any] ) -> Optional[int]:
super().__init__(A_ )
super()._init_backbone(A_ )
A = [config.embedding_size] + config.hidden_sizes
A = ResNetEmbeddings(A_ )
A = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ ,config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tensor ,A_ : Optional[bool] = None ,A_ : Optional[bool] = None ) -> BackboneOutput:
A = return_dict if return_dict is not None else self.config.use_return_dict
A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A = self.embedder(A_ )
A = self.encoder(A_ ,output_hidden_states=A_ ,return_dict=A_ )
A = outputs.hidden_states
A = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=A_ ,)
| 74
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 158
| 0
|
def _lowerCAmelCase ( A__: int = 1000 ):
'''simple docstring'''
UpperCAmelCase = 2**power
UpperCAmelCase = 0
while n:
UpperCAmelCase , UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 152
|
def _lowerCAmelCase ( A__: list[int] , A__: list[int] ):
'''simple docstring'''
UpperCAmelCase = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
UpperCAmelCase = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
UpperCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [1, 3, 0, 5, 8, 5]
__magic_name__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 152
| 1
|
"""simple docstring"""
from __future__ import annotations
lowercase__ : Optional[int] = list[tuple[int, int]]
lowercase__ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : int = pos_x
lowerCAmelCase_ : Optional[int] = pos_y
lowerCAmelCase_ : Dict = (pos_y, pos_x)
lowerCAmelCase_ : Union[str, Any] = goal_x
lowerCAmelCase_ : List[str] = goal_y
lowerCAmelCase_ : Dict = g_cost
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : List[Any] = self.calculate_heuristic()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : str = abs(self.pos_x - self.goal_x )
lowerCAmelCase_ : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = [self.start]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : str ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase_ : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ : int = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
lowerCAmelCase_ : Any = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Union[str, Any] = []
for action in delta:
lowerCAmelCase_ : Optional[int] = parent.pos_x + action[1]
lowerCAmelCase_ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) )
return successors
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = node
lowerCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ : int = (0, 0)
lowercase__ : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowercase__ : Dict = GreedyBestFirst(init, goal)
lowercase__ : List[str] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ : Any = 2
for elem in grid:
print(elem)
| 224
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Tuple = ['input_features', 'attention_mask']
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple=80 , lowerCamelCase__ : List[str]=1_60_00 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : int=10 , lowerCamelCase__ : Optional[int]=25 , lowerCamelCase__ : Optional[int]="hamming_window" , lowerCamelCase__ : int=3_27_68.0 , lowerCamelCase__ : Union[str, Any]=0.9_7 , lowerCamelCase__ : int=1.0 , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=False , **lowerCamelCase__ : Any , ) ->Any:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = feature_size
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : List[Any] = padding_value
_UpperCAmelCase : List[Any] = hop_length
_UpperCAmelCase : List[str] = win_length
_UpperCAmelCase : Union[str, Any] = frame_signal_scale
_UpperCAmelCase : List[Any] = preemphasis_coeff
_UpperCAmelCase : List[str] = mel_floor
_UpperCAmelCase : str = normalize_means
_UpperCAmelCase : Dict = normalize_vars
_UpperCAmelCase : Dict = win_function
_UpperCAmelCase : Tuple = return_attention_mask
_UpperCAmelCase : List[Any] = win_length * sampling_rate // 10_00
_UpperCAmelCase : Union[str, Any] = hop_length * sampling_rate // 10_00
_UpperCAmelCase : Tuple = optimal_fft_length(self.sample_size )
_UpperCAmelCase : Union[str, Any] = (self.n_fft // 2) + 1
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple ) ->np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
_UpperCAmelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : Any = window_function(window_length=self.sample_size , name=self.win_function )
_UpperCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_UpperCAmelCase : str = spectrogram(
one_waveform * self.frame_signal_scale , window=_SCREAMING_SNAKE_CASE , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_SCREAMING_SNAKE_CASE , preemphasis=self.preemphasis_coeff , mel_filters=_SCREAMING_SNAKE_CASE , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) ->Optional[Any]:
'''simple docstring'''
if self.normalize_means:
_UpperCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
_UpperCAmelCase : Union[str, Any] = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.normalize_vars:
_UpperCAmelCase : Dict = x[:input_length].std(axis=0 )
_UpperCAmelCase : Optional[Any] = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
_UpperCAmelCase : Optional[Any] = padding_value
# make sure array is in float32
_UpperCAmelCase : Tuple = x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any = None ) ->List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.padding_value ) for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
def __call__( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int = False , lowerCamelCase__ : List[Any] = None , lowerCamelCase__ : Union[str, Any] = False , lowerCamelCase__ : List[Any] = None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : str = None , lowerCamelCase__ : Optional[Any] = None , **lowerCamelCase__ : Optional[int] , ) ->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Any = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : int = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : Tuple = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_UpperCAmelCase : List[Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Any = [raw_speech]
# extract fbank features
_UpperCAmelCase : str = [self._extract_mfsc_features(_SCREAMING_SNAKE_CASE ) for one_waveform in raw_speech]
# convert into correct format for padding
_UpperCAmelCase : Dict = BatchFeature({"input_features": features} )
_UpperCAmelCase : str = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
_UpperCAmelCase : Any = padded_inputs.get("input_features" )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
_UpperCAmelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_UpperCAmelCase : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_UpperCAmelCase : Optional[int] = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_UpperCAmelCase : int = self.normalize(
padded_inputs["input_features"] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
_UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 364
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = DPTConfig()
if "large" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = [5, 11, 17, 23]
snake_case_ = [256, 512, 1024, 1024]
snake_case_ = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case_ = True
snake_case_ = 150
snake_case_ = 'huggingface/label-files'
snake_case_ = 'ade20k-id2label.json'
snake_case_ = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = [1, 150, 480, 480]
return config, expected_shape
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
snake_case_ = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
snake_case_ = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
snake_case_ = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
snake_case_ = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
snake_case_ = name.replace('proj' , 'projection' )
if "blocks" in name:
snake_case_ = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
snake_case_ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case_ = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case_ = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
snake_case_ = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
snake_case_ = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
snake_case_ = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
snake_case_ = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
snake_case_ = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
snake_case_ = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
snake_case_ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
snake_case_ = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
snake_case_ = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
snake_case_ = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
snake_case_ = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
snake_case_ = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
snake_case_ = name.replace('pretrained' , 'dpt' )
if "bn" in name:
snake_case_ = name.replace('bn' , 'batch_norm' )
if "head" in name:
snake_case_ = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
snake_case_ = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
snake_case_ = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: config.hidden_size, :]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( ) -> Optional[int]:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ , snake_case_ = get_dpt_config(UpperCAmelCase )
# load original state_dict from URL
snake_case_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(UpperCAmelCase )
snake_case_ = val
# read in qkv matrices
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase )
# load HuggingFace model
snake_case_ = DPTForSemanticSegmentation(UpperCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# Check outputs on an image
snake_case_ = 480 if 'ade' in checkpoint_url else 384
snake_case_ = DPTImageProcessor(size=UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(UpperCAmelCase , return_tensors='pt' )
# forward pass
snake_case_ = model(**UpperCAmelCase ).logits if 'ade' in checkpoint_url else model(**UpperCAmelCase ).predicted_depth
# Assert logits
snake_case_ = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
snake_case_ = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(UpperCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , UpperCAmelCase )
)
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
__UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
snake_case_ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_ , snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(UpperCAmelCase ):
snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 69
| 1
|
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
snake_case : Dict = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10
| 1
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 297
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a__ : Tuple ='''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 53
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert column_title.isupper()
lowercase_ : Dict = 0
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Optional[int] = 0
while index >= 0:
lowercase_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , __SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''gpt_neox'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_04_32 , __SCREAMING_SNAKE_CASE=61_44 , __SCREAMING_SNAKE_CASE=44 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_45_76 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=1_00_00 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Tuple = rotary_pct
lowercase_ : Optional[Any] = rotary_emb_base
lowercase_ : Any = attention_dropout
lowercase_ : str = hidden_dropout
lowercase_ : Dict = classifier_dropout
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Union[str, Any] = use_cache
lowercase_ : int = tie_word_embeddings
lowercase_ : Tuple = use_parallel_residual
lowercase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : List[Any] = self.rope_scaling.get('''type''' , __SCREAMING_SNAKE_CASE )
lowercase_ : int = self.rope_scaling.get('''factor''' , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 264
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''AutoTokenizer'''
snake_case__ : Optional[Any] = ['''tokenizer''']
snake_case__ : int = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=None ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
if speaker_embeddings_dict_path is not None:
a_ : int = get_file_from_repo(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , subfolder=kwargs.pop('subfolder' , SCREAMING_SNAKE_CASE__ ) , cache_dir=kwargs.pop('cache_dir' , SCREAMING_SNAKE_CASE__ ) , force_download=kwargs.pop('force_download' , SCREAMING_SNAKE_CASE__ ) , proxies=kwargs.pop('proxies' , SCREAMING_SNAKE_CASE__ ) , resume_download=kwargs.pop('resume_download' , SCREAMING_SNAKE_CASE__ ) , local_files_only=kwargs.pop('local_files_only' , SCREAMING_SNAKE_CASE__ ) , use_auth_token=kwargs.pop('use_auth_token' , SCREAMING_SNAKE_CASE__ ) , revision=kwargs.pop('revision' , SCREAMING_SNAKE_CASE__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
a_ : Any = None
else:
with open(SCREAMING_SNAKE_CASE__ ) as speaker_embeddings_json:
a_ : int = json.load(SCREAMING_SNAKE_CASE__ )
else:
a_ : str = None
a_ : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(tokenizer=SCREAMING_SNAKE_CASE__ , speaker_embeddings=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE__ : Dict="speaker_embeddings" , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Union[str, Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'v2' ) , exist_ok=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {}
a_ : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a_ : Tuple = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , SCREAMING_SNAKE_CASE__ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=SCREAMING_SNAKE_CASE__ , )
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , F"""{prompt_key}_{key}.npy""" )
a_ : int = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().save_pretrained(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
a_ : Union[str, Any] = self.speaker_embeddings[voice_preset]
a_ : str = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
a_ : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , SCREAMING_SNAKE_CASE__ ) , cache_dir=kwargs.pop('cache_dir' , SCREAMING_SNAKE_CASE__ ) , force_download=kwargs.pop('force_download' , SCREAMING_SNAKE_CASE__ ) , proxies=kwargs.pop('proxies' , SCREAMING_SNAKE_CASE__ ) , resume_download=kwargs.pop('resume_download' , SCREAMING_SNAKE_CASE__ ) , local_files_only=kwargs.pop('local_files_only' , SCREAMING_SNAKE_CASE__ ) , use_auth_token=kwargs.pop('use_auth_token' , SCREAMING_SNAKE_CASE__ ) , revision=kwargs.pop('revision' , SCREAMING_SNAKE_CASE__ ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
a_ : str = np.load(SCREAMING_SNAKE_CASE__ )
return voice_preset_dict
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[dict] = None ) -> int:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]="pt" , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Any=False , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> List[str]:
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a_ : Optional[int] = self._load_voice_preset(SCREAMING_SNAKE_CASE__ )
else:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not voice_preset.endswith('.npz' ):
a_ : Optional[Any] = voice_preset + '.npz'
a_ : Any = np.load(SCREAMING_SNAKE_CASE__ )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Dict = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if voice_preset is not None:
a_ : Optional[int] = voice_preset
return encoded_text
| 32
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A : List[str] = get_tests_dir('fixtures')
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = mock.Mock()
__a = 500
__a = {}
__a = HTTPError
__a = {}
# Download this model to make sure it's in the cache.
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case ) as mock_head:
__a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_snake_case )
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Optional[Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_snake_case , repo_id='''test-image-processor''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ViTImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_snake_case , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
__a = CustomImageProcessor.from_pretrained(_snake_case )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
__a = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 371
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 0
|
"""simple docstring"""
import baseaa
def lowerCAmelCase_ ( snake_case_ : str ) ->bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCAmelCase_ ( snake_case_ : bytes ) ->str:
return baseaa.aaadecode(snake_case_ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126
|
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( snake_case_ : SplitDict ) ->str:
lowerCamelCase__ : str =split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
lowerCamelCase__ : Optional[Any] =SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__ : Dict =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__ : Optional[int] =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name='my_dataset' )] )
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Union[str, Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCamelCase__ : List[str] =asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 126
| 1
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( __magic_name__ ):
print("Loading config file..." )
def flatten_yaml_as_dict(__magic_name__ , __magic_name__="" , __magic_name__="." ):
lowercase__ = []
for k, v in d.items():
lowercase__ = parent_key + sep + k if parent_key else k
if isinstance(__magic_name__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__magic_name__ , __magic_name__ , sep=__magic_name__ ).items() )
else:
items.append((new_key, v) )
return dict(__magic_name__ )
lowercase__ = argparse.Namespace()
with open(__magic_name__ , "r" ) as yaml_file:
try:
lowercase__ = yaml.load(__magic_name__ , Loader=yaml.FullLoader )
lowercase__ = flatten_yaml_as_dict(__magic_name__ )
for k, v in flat_cfg.items():
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(__magic_name__ , str(__magic_name__ ) ) )
return config
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = MobileViTVaConfig()
lowercase__ = False
# dataset
if task_name.startswith("imagenet1k_" ):
lowercase__ = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
lowercase__ = 384
else:
lowercase__ = 256
lowercase__ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
lowercase__ = 2_1000
if int(task_name.strip().split("_" )[-1] ) == 384:
lowercase__ = 384
else:
lowercase__ = 256
lowercase__ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
lowercase__ = 151
lowercase__ = 512
lowercase__ = "ade20k-id2label.json"
lowercase__ = True
elif task_name.startswith("voc_" ):
lowercase__ = 21
lowercase__ = 512
lowercase__ = "pascal-voc-id2label.json"
lowercase__ = True
# orig_config
lowercase__ = load_orig_config_file(__magic_name__ )
assert getattr(__magic_name__ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
lowercase__ = getattr(__magic_name__ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(__magic_name__ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase__ = getattr(__magic_name__ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase__ = getattr(__magic_name__ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
lowercase__ = getattr(__magic_name__ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
lowercase__ = getattr(__magic_name__ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
lowercase__ = getattr(__magic_name__ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
lowercase__ = "huggingface/label-files"
lowercase__ = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = dct.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ , __magic_name__=False ):
if base_model:
lowercase__ = ""
else:
lowercase__ = "mobilevitv2."
lowercase__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase__ = k[8:]
else:
lowercase__ = k
if ".block." in k:
lowercase__ = k_new.replace(".block." , "." )
if ".conv." in k:
lowercase__ = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
lowercase__ = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
lowercase__ = k_new.replace("conv_1." , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase__ = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
lowercase__ = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase__ = [0, 1]
elif i == 4:
lowercase__ = [0, 1, 2, 3]
elif i == 5:
lowercase__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase__ = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
lowercase__ = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
lowercase__ = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
lowercase__ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
lowercase__ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
lowercase__ = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
lowercase__ = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
lowercase__ = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
lowercase__ = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def _A ( __magic_name__ ):
lowercase__ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(__magic_name__ )
for k in keys_to_ignore:
state_dict.pop(__magic_name__ , __magic_name__ )
def _A ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase__ = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = get_mobilevitva_config(__magic_name__ , __magic_name__ )
# load original state_dict
lowercase__ = torch.load(__magic_name__ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
lowercase__ = MobileViTVaForSemanticSegmentation(__magic_name__ ).eval()
lowercase__ = False
else:
lowercase__ = MobileViTVaForImageClassification(__magic_name__ ).eval()
lowercase__ = False
# remove and rename some keys of load the original model
lowercase__ = checkpoint
remove_unused_keys(__magic_name__ )
lowercase__ = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
# load modified state_dict
model.load_state_dict(__magic_name__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase__ = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ = model(**__magic_name__ )
# verify classification model
if task_name.startswith("imagenet" ):
lowercase__ = outputs.logits
lowercase__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase__ = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1e-4 )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 201
|
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
def _SCREAMING_SNAKE_CASE ( a , a ) -> list[int]:
__A : Optional[int] = int(a )
# Initialize Result
__A : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(a ):
# Find denominations
while int(a ) >= int(a ):
total_value -= int(a )
answer.append(a ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCAmelCase : List[Any] = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : Optional[int] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCAmelCase : Tuple = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 280
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
from manim import *
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : str = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ : int = [mem.copy() for i in range(6 )]
UpperCamelCase_ : List[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_ : Dict = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : List[str] = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : int = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : int = Text('CPU' , font_size=2_4 )
UpperCamelCase_ : List[str] = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
UpperCamelCase_ : Union[str, Any] = [mem.copy() for i in range(1 )]
UpperCamelCase_ : Dict = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : Union[str, Any] = Text('GPU' , font_size=2_4 )
UpperCamelCase_ : Optional[Any] = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.align_to(snake_case , snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case )
UpperCamelCase_ : int = [mem.copy() for i in range(6 )]
UpperCamelCase_ : int = VGroup(*snake_case ).arrange(snake_case , buff=0 )
UpperCamelCase_ : Tuple = Text('Model' , font_size=2_4 )
UpperCamelCase_ : Dict = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , )
UpperCamelCase_ : Union[str, Any] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=2_4 , )
UpperCamelCase_ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ : Dict = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=2.5 ) , Write(snake_case ) , Write(snake_case ) )
self.add(snake_case )
UpperCamelCase_ : Tuple = []
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Tuple = []
for i, rect in enumerate(snake_case ):
UpperCamelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
cpu_target.move_to(snake_case )
cpu_target.generate_target()
UpperCamelCase_ : int = 0.46 / 4
UpperCamelCase_ : Tuple = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case , buff=0.0 )
cpu_targs.append(snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case ) )
second_animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(*snake_case )
self.wait()
| 50
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for data in source_data:
for i, el in enumerate(lowercase_ ):
if len(lowercase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for dlist, weight in zip(lowercase_ , lowercase_ ):
A__ = min(lowercase_ )
A__ = max(lowercase_ )
A__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowercase_ )
score_lists.append(lowercase_ )
return score_lists
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase_ ):
A__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = get_data(lowercase_ )
A__ = calculate_each_score(lowercase_ , lowercase_ )
A__ = generate_final_scores(lowercase_ )
# append scores to source data
for i, ele in enumerate(lowercase_ ):
source_data[i].append(lowercase_ )
return source_data
| 14
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class __snake_case ( lowerCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase_ = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase_ = Features({"question": Value("string" ), "context": Value("string" )} )
lowerCAmelCase_ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
lowerCAmelCase_ = "question"
lowerCAmelCase_ = "context"
lowerCAmelCase_ = "answers"
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 204
|
from __future__ import annotations
__lowerCamelCase : Dict = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE__ = arr[j]
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i, outer in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE__ = inner
break
result.append(__UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[float] ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [-1] * arr_size
for index in reversed(range(__UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : List[Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 204
| 1
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89
|
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A = datasets.utils.logging.get_logger(__name__)
__A = ['''names''', '''prefix''']
__A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__A = ['''encoding_errors''', '''on_bad_lines''']
__A = ['''date_format''']
@dataclass
class lowercase ( datasets.BuilderConfig):
"""simple docstring"""
a__ : str = ","
a__ : Optional[str] = None
a__ : Optional[Union[int, List[int], str]] = "infer"
a__ : Optional[List[str]] = None
a__ : Optional[List[str]] = None
a__ : Optional[Union[int, str, List[int], List[str]]] = None
a__ : Optional[Union[List[int], List[str]]] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : Optional[Literal["c", "python", "pyarrow"]] = None
a__ : Dict[Union[int, str], Callable[[Any], Any]] = None
a__ : Optional[list] = None
a__ : Optional[list] = None
a__ : bool = False
a__ : Optional[Union[int, List[int]]] = None
a__ : Optional[int] = None
a__ : Optional[Union[str, List[str]]] = None
a__ : bool = True
a__ : bool = True
a__ : bool = False
a__ : bool = True
a__ : Optional[str] = None
a__ : str = "."
a__ : Optional[str] = None
a__ : str = '"'
a__ : int = 0
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : bool = True
a__ : int = 0
a__ : bool = True
a__ : bool = False
a__ : Optional[str] = None
a__ : int = 1_0000
a__ : Optional[datasets.Features] = None
a__ : Optional[str] = "strict"
a__ : Literal["error", "warn", "skip"] = "error"
a__ : Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
if self.delimiter is not None:
UpperCAmelCase_= self.delimiter
if self.column_names is not None:
UpperCAmelCase_= self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase_= {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
a__ : int = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str ) -> List[str]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
UpperCAmelCase_= data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase_= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"""files""": files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
UpperCAmelCase_= self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_= table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
UpperCAmelCase_= pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_= pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 367
|
def __a ( lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __a ( lowerCAmelCase_ : dict[int, list[int]] ) -> list[tuple[int, int]]:
'''simple docstring'''
UpperCAmelCase_= 0
UpperCAmelCase_= len(lowerCAmelCase_ ) # No of vertices in graph
UpperCAmelCase_= [0] * n
UpperCAmelCase_= [False] * n
def dfs(lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : int ):
UpperCAmelCase_= True
UpperCAmelCase_= id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,id_ )
UpperCAmelCase_= min(low[at] ,low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase_= min(low[at] ,low[to] )
UpperCAmelCase_= []
for i in range(lowerCAmelCase_ ):
if not visited[i]:
dfs(lowerCAmelCase_ ,-1 ,lowerCAmelCase_ ,id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
| 0
|
def lowerCAmelCase_ ( __a , __a , __a ) -> float:
"""simple docstring"""
lowerCamelCase__: str =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase__ = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
UpperCAmelCase__ = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
UpperCAmelCase__ = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string'),
'references': datasets.Value('string'),
}) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any] , A : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = 0.0
for i, j in zip(A , A):
n_correct += 1.0 if math_equivalence.is_equiv(A , A) else 0.0
_UpperCAmelCase = n_correct / len(A)
return {
"accuracy": accuracy,
}
| 290
|
UpperCAmelCase__ = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> list[str]:
'''simple docstring'''
_UpperCAmelCase = set()
# keep track of all the paths to be checked
_UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_UpperCAmelCase = queue.pop(0 )
# get the last node from the path
_UpperCAmelCase = path[-1]
if node not in explored:
_UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_UpperCAmelCase = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_UpperCAmelCase = [start]
_UpperCAmelCase = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
_UpperCAmelCase = {start: 0, target: -1}
while queue:
_UpperCAmelCase = queue.pop(0 )
if node == target:
_UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
_UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 290
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = torch.nn.Linear(10 , 10)
__a = torch.optim.SGD(model.parameters() , 0.1)
__a = Accelerator()
__a = accelerator.prepare(__SCREAMING_SNAKE_CASE)
try:
pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE))
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}')
AcceleratorState._reset_state()
| 49
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = """▁"""
UpperCAmelCase_ : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : str = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase_ : str = {
"""facebook/xglm-564M""": 2048,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , lowercase_ : str , lowercase_ : Tuple="<s>" , lowercase_ : Any="</s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Tuple = [F'<madeupword{i}>' for i in range(self.num_madeup_words)]
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''additional_special_tokens''' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.sp_model)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return [1] + ([0] * len(lowercase_))
return [1] + ([0] * len(lowercase_)) + [1, 1] + ([0] * len(lowercase_))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : str):
'''simple docstring'''
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Optional[Any]):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(lowercase_).replace(lowercase_ , ''' ''').strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , '''wb''') as fi:
SCREAMING_SNAKE_CASE_ : int = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 91
| 0
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase_ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=7_6_8 ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = proj_size
_SCREAMING_SNAKE_CASE = CLIPVisionModel(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = PaintByExampleMapper(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size )
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model(pixel_values=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = clip_output.pooler_output
_SCREAMING_SNAKE_CASE = self.mapper(latent_states[:, None] )
_SCREAMING_SNAKE_CASE = self.final_layer_norm(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = self.proj_out(__SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE = (config.num_hidden_layers + 1) // 5
_SCREAMING_SNAKE_CASE = config.hidden_size
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , activation_fn="gelu" , attention_bias=__SCREAMING_SNAKE_CASE )
for _ in range(__SCREAMING_SNAKE_CASE )
] )
def lowerCAmelCase_ ( self : int , __lowerCamelCase : int ):
"""simple docstring"""
for block in self.blocks:
_SCREAMING_SNAKE_CASE = block(__SCREAMING_SNAKE_CASE )
return hidden_states
| 352
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''linear'''
lowerCamelCase_ = '''cosine'''
lowerCamelCase_ = '''cosine_with_restarts'''
lowerCamelCase_ = '''polynomial'''
lowerCamelCase_ = '''constant'''
lowerCamelCase_ = '''constant_with_warmup'''
lowerCamelCase_ = '''piecewise_constant'''
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int = -1 ) -> Optional[int]:
return LambdaLR(__A , lambda __A : 1 , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int = -1 ) -> Dict:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1.0 , __A ) )
return 1.0
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : str , __A : int = -1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = step_rules.split("," )
for rule_str in rule_list[:-1]:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = rule_str.split(":" )
_SCREAMING_SNAKE_CASE = int(__A )
_SCREAMING_SNAKE_CASE = float(__A )
_SCREAMING_SNAKE_CASE = value
_SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__A : Tuple , __A : List[Any] ):
def rule_func(__A : int ) -> float:
_SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_SCREAMING_SNAKE_CASE = create_rules_function(__A , __A )
return LambdaLR(__A , __A , last_epoch=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any]=-1 ) -> str:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : float = 0.5 , __A : int = -1 ) -> Any:
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A ) * 2.0 * progress )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optimizer , __A : int , __A : int , __A : int = 1 , __A : int = -1 ) -> str:
def lr_lambda(__A : Optional[int] ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
_SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A ) * progress) % 1.0) )) )
return LambdaLR(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[Any] , __A : List[str] , __A : Any=1e-7 , __A : Optional[Any]=1.0 , __A : List[Any]=-1 ) -> Tuple:
_SCREAMING_SNAKE_CASE = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_SCREAMING_SNAKE_CASE = lr_init - lr_end
_SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
_SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
_SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A )
lowerCamelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, SchedulerType] , __A : Optimizer , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : int = 1 , __A : float = 1.0 , __A : int = -1 , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = SchedulerType(__A )
_SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A )
| 111
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=32 , _a=True , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = image_size
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size_divisor
lowerCamelCase = do_rescale
def _lowerCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __magic_name__ ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = GLPNImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size_divisor""" ) )
self.assertTrue(hasattr(_a , """resample""" ) )
self.assertTrue(hasattr(_a , """do_rescale""" ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 291
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def lowercase ( __snake_case : str , __snake_case : str , __snake_case : Optional[str] = None ):
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : Union[str, Any] = quote(__snake_case )
return hfh.hf_hub_url(__snake_case , __snake_case , repo_type='''dataset''' , revision=__snake_case )
| 33
| 0
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str ):
__snake_case : int = """"""
__snake_case : Tuple = """"""
__snake_case : List[str] = []
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__snake_case : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__snake_case : Tuple = self.__min_dist_top_down_dp(_lowerCAmelCase , n - 1 )
__snake_case : List[Any] = self.__min_dist_top_down_dp(m - 1 , _lowerCAmelCase )
__snake_case : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__snake_case : List[Any] = 1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self.dp[m][n]
def snake_case__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = worda
__snake_case : Dict = worda
__snake_case : Optional[int] = [[-1 for _ in range(len(_lowerCAmelCase ) )] for _ in range(len(_lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCAmelCase ) - 1 , len(_lowerCAmelCase ) - 1 )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str ):
__snake_case : Optional[int] = worda
__snake_case : List[Any] = worda
__snake_case : Optional[int] = len(_lowerCAmelCase )
__snake_case : str = len(_lowerCAmelCase )
__snake_case : Tuple = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__snake_case : List[Any] = j
elif j == 0: # second string is empty
__snake_case : Any = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__snake_case : List[Any] = self.dp[i - 1][j - 1]
else:
__snake_case : Optional[int] = self.dp[i][j - 1]
__snake_case : Union[str, Any] = self.dp[i - 1][j]
__snake_case : Dict = self.dp[i - 1][j - 1]
__snake_case : int = 1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
lowercase_ = input("Enter the first string: ").strip()
lowercase_ = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 20
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 20
| 1
|
"""simple docstring"""
_A = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Optional[Any] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase__ : Stack[int] = Stack()
UpperCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCAmelCase )
elif i == ")":
# RULE 4
UpperCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : str = operators[opr](lowerCAmelCase , lowerCAmelCase )
operand_stack.push(lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 171
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE = ['image']
SCREAMING_SNAKE_CASE = ['image']
SCREAMING_SNAKE_CASE = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE = False
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return 32
@property
def _a (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a (self ):
"""simple docstring"""
return 8
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCAmelCase__ : Optional[int] = CLIPVisionModel(_lowerCamelCase )
return model
@property
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCamelCase , do_normalize=_lowerCamelCase , do_resize=_lowerCamelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase__ : int = PriorTransformer(**_lowerCamelCase )
return model
@property
def _a (self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ : int = ShapERenderer(**_lowerCamelCase )
return model
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.dummy_prior
UpperCAmelCase__ : str = self.dummy_image_encoder
UpperCAmelCase__ : str = self.dummy_image_processor
UpperCAmelCase__ : Dict = self.dummy_renderer
UpperCAmelCase__ : int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_lowerCamelCase , clip_sample=_lowerCamelCase , clip_sample_range=1.0 , )
UpperCAmelCase__ : Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a (self , _lowerCamelCase , _lowerCamelCase=0 ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase__ : str = torch.manual_seed(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = """cpu"""
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**_lowerCamelCase )
UpperCAmelCase__ : List[str] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
UpperCAmelCase__ : Tuple = output.images[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a (self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = torch_device == """cpu"""
UpperCAmelCase__ : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Any = self.get_dummy_inputs(_lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ : str = batch_size * [inputs[key]]
UpperCAmelCase__ : Union[str, Any] = pipe(**_lowerCamelCase , num_images_per_prompt=_lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCAmelCase__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCAmelCase__ : Dict = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCAmelCase__ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase__ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
_lowerCamelCase , generator=_lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 171
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> List[Any]:
_UpperCAmelCase : Dict = r'''\w+[.]\d+'''
_UpperCAmelCase : Tuple = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase : Tuple = key.replace(a__ , "_".join(pat.split("." ) ) )
return key
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] ) -> List[str]:
_UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase : int = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase : str = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any]=42 ) -> str:
_UpperCAmelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase : List[Any] = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase : List[str] = flatten_dict(a__ )
_UpperCAmelCase : Any = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase : List[str] = rename_key(a__ )
_UpperCAmelCase : Dict = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
_UpperCAmelCase : Union[str, Any] = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase : str = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 369
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class a ( UpperCAmelCase ):
_lowercase = "facebook/nllb-200-distilled-600M"
_lowercase = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
_lowercase = "translator"
_lowercase = AutoTokenizer
_lowercase = AutoModelForSeqaSeqLM
_lowercase = LANGUAGE_CODES
_lowercase = ["text", "text", "text"]
_lowercase = ["text"]
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_UpperCAmelCase : int = self.lang_to_code[src_lang]
_UpperCAmelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors="pt" , src_lang=A_ , tgt_lang=A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 189
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__A = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = """facebook/nllb-200-distilled-600M"""
SCREAMING_SNAKE_CASE_ : List[str] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
SCREAMING_SNAKE_CASE_ : str = """translator"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer
SCREAMING_SNAKE_CASE_ : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE_ : Any = LANGUAGE_CODES
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""text""", """text""", """text"""]
SCREAMING_SNAKE_CASE_ : int = ["""text"""]
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
__lowerCAmelCase: int = self.lang_to_code[src_lang]
__lowerCAmelCase: Tuple = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="pt" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
return self.model.generate(**UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any])-> List[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
| 217
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217
| 1
|
def __a ( lowerCAmelCase_ : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __a ( lowerCAmelCase_ : int = 50_00 ) -> str:
'''simple docstring'''
UpperCAmelCase_= [(i * (3 * i - 1)) // 2 for i in range(1 ,lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ,len(lowerCAmelCase__ ) ):
UpperCAmelCase_= pentagonal_nums[j]
UpperCAmelCase_= pentagonal_i + pentagonal_j
UpperCAmelCase_= pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f'{solution() = }')
| 366
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) -> str:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(2 ,4 )
UpperCAmelCase_= torch.optim.AdamW(model.parameters() ,lr=1.0 )
UpperCAmelCase_= torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class lowercase ( snake_case__):
"""simple docstring"""
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_= GradientState()
assert state.num_steps == 1
UpperCAmelCase_= 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_= False
assert state.sync_gradients is False
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
)= accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Dict , **__UpperCAmelCase : Tuple ):
pass
with patch("""torch.cuda.set_device""" , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
UpperCAmelCase_= Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= get_signature(__UpperCAmelCase )
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ):
UpperCAmelCase_= {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# loading hook
def load_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """r""" ) as f:
UpperCAmelCase_= json.load(__UpperCAmelCase )
UpperCAmelCase_= config["""class_name"""]
UpperCAmelCase_= accelerator.register_save_state_pre_hook(__UpperCAmelCase )
UpperCAmelCase_= accelerator.register_load_state_pre_hook(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_= """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= None
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_= Accelerator()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components()
UpperCAmelCase_= [1, 2, 3]
# This should work
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map={"""""": 0} , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= Accelerator()
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
from transformers import AutoModelForCausalLM
UpperCAmelCase_= {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase )
UpperCAmelCase_= 1
UpperCAmelCase_= AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
UpperCAmelCase_= Accelerator()
# This should work
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
@require_cuda
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_= torch.nn.Linear(10 , 10 )
UpperCAmelCase_= torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase )
UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
| 277
| 0
|
from __future__ import annotations
def _lowerCAmelCase (_lowerCAmelCase): # This function is recursive
UpperCamelCase_ = len(_lowerCAmelCase)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCamelCase_ = array[0]
UpperCamelCase_ = False
UpperCamelCase_ = 1
UpperCamelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCamelCase_ = True
UpperCamelCase_ = [element for element in array[i:] if element >= array[i]]
UpperCamelCase_ = longest_subsequence(_lowerCAmelCase)
if len(_lowerCAmelCase) > len(_lowerCAmelCase):
UpperCamelCase_ = temp_array
else:
i += 1
UpperCamelCase_ = [element for element in array[1:] if element >= pivot]
UpperCamelCase_ = [pivot, *longest_subsequence(_lowerCAmelCase)]
if len(_lowerCAmelCase) > len(_lowerCAmelCase):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Any ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8):
UpperCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12):
UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
UpperCamelCase_ = np.array(pil_image.convert("RGB"))
UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1
UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1])
UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0)
return image
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
else:
UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
UpperCamelCase_ = self.movq.config.scaling_factor * init_latents
UpperCamelCase_ = torch.cat([init_latents] , dim=0 )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = init_latents
return latents
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
UpperCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
UpperCamelCase_ = self._execution_device
UpperCamelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
UpperCamelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"]
UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
UpperCamelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = {"image_embeds": image_embeds}
UpperCamelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase_ = image * 0.5 + 0.5
UpperCamelCase_ = image.clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 128
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( _lowercase ) -> list:
if len(_lowercase ) == 0:
return []
UpperCAmelCase : Union[str, Any] = min(_lowercase ), max(_lowercase )
UpperCAmelCase : str = int(max_value - min_value ) + 1
UpperCAmelCase : list[list] = [[] for _ in range(_lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowercase )
return [v for bucket in buckets for v in sorted(_lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 350
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A = None , A = None , A = False , **A , ) -> Tuple:
super().__init__(features=A , cache_dir=A , keep_in_memory=A , **A )
UpperCAmelCase : Any = Sql(
cache_dir=A , features=A , sql=A , con=A , **A , )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = None
UpperCAmelCase : Any = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , )
# Build dataset for splits
UpperCAmelCase : str = self.builder.as_dataset(
split="""train""" , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A , A = None , A = None , **A , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase : Dict = dataset
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Any = con
UpperCAmelCase : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : Optional[Any] = num_proc
UpperCAmelCase : str = to_sql_kwargs
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.to_sql_kwargs.pop("""sql""" , A )
UpperCAmelCase : str = self.to_sql_kwargs.pop("""con""" , A )
UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("""index""" , A )
UpperCAmelCase : str = self._write(index=A , **self.to_sql_kwargs )
return written
def _lowercase( self , A ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = args
UpperCAmelCase : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase : int = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Any = batch.to_pandas()
UpperCAmelCase : List[Any] = df.to_sql(self.name , self.con , index=A , **A )
return num_rows or len(A )
def _lowercase( self , A , **A ) -> int:
UpperCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase , UpperCAmelCase : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 338
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {"""vocab_file""": """sentencepiece.model"""}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
a_ : str = {
"""google/rembert""": 256,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="[CLS]" , UpperCamelCase="[SEP]" , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCamelCase )
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.EncodeAsPieces(UpperCamelCase )
return pieces
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.sp_model.decode_pieces(UpperCamelCase )
return out_string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase ) )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 55
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self , UpperCamelCase = 1 , UpperCamelCase = 2000 , UpperCamelCase = None , UpperCamelCase = "pil" , UpperCamelCase = True , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = self.unet.config.sample_size
lowerCamelCase_ = (batch_size, 3, img_size, img_size)
lowerCamelCase_ = self.unet
lowerCamelCase_ = randn_tensor(UpperCamelCase , generator=UpperCamelCase ) * self.scheduler.init_noise_sigma
lowerCamelCase_ = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
self.scheduler.set_sigmas(UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase_ = self.unet(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_correct(UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# prediction step
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase ).sample
lowerCamelCase_ = self.scheduler.step_pred(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase )
lowerCamelCase_ ,lowerCamelCase_ = output.prev_sample, output.prev_sample_mean
lowerCamelCase_ = sample_mean.clamp(0 , 1 )
lowerCamelCase_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase )
| 55
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = '''mobilenet_v1'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : int=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=8 , SCREAMING_SNAKE_CASE__ : List[Any]="relu6" , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.999 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.001 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
a_ : List[Any] = num_channels
a_ : Dict = image_size
a_ : Dict = depth_multiplier
a_ : Tuple = min_depth
a_ : Tuple = hidden_act
a_ : int = tf_padding
a_ : int = classifier_dropout_prob
a_ : List[Any] = initializer_range
a_ : List[Any] = layer_norm_eps
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> float:
return 1E-4
| 120
|
from __future__ import annotations
UpperCAmelCase_ : Dict = [True] * 100_0001
UpperCAmelCase_ : Any = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
UpperCAmelCase_ : Tuple = False
i += 1
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
return seive[n]
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> bool:
"""simple docstring"""
return any(digit in '02468' for digit in str(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_00_00_00 ) -> list[int]:
"""simple docstring"""
a_ : Dict = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__A ) and not contains_an_even_digit(__A ):
a_ : Dict = str(__A )
a_ : Any = [int(str_num[j:] + str_num[:j] ) for j in range(len(__A ) )]
if all(is_prime(__A ) for i in list_nums ):
result.append(__A )
return result
def SCREAMING_SNAKE_CASE_ ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 120
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''OwlViTImageProcessor'''
lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : List[Any]=None , __magic_name__ : str=None , **__magic_name__ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]="max_length" , __magic_name__ : List[Any]="np" , **__magic_name__ : Union[str, Any] ) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__magic_name__ , __magic_name__ ) or (isinstance(__magic_name__ , __magic_name__ ) and not isinstance(text[0] , __magic_name__ )):
SCREAMING_SNAKE_CASE_ = [self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )]
elif isinstance(__magic_name__ , __magic_name__ ) and isinstance(text[0] , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE_ = max([len(__magic_name__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__magic_name__ ) != max_num_queries:
SCREAMING_SNAKE_CASE_ = t + [" "] * (max_num_queries - len(__magic_name__ ))
SCREAMING_SNAKE_CASE_ = self.tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
encodings.append(__magic_name__ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
SCREAMING_SNAKE_CASE_ = BatchEncoding()
SCREAMING_SNAKE_CASE_ = input_ids
SCREAMING_SNAKE_CASE_ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE_ = BatchEncoding()
SCREAMING_SNAKE_CASE_ = self.image_processor(
__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ).pixel_values
SCREAMING_SNAKE_CASE_ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def __A ( self : List[Any] , *__magic_name__ : Dict , **__magic_name__ : List[Any] ) -> Optional[int]:
return self.image_processor.post_process(*__magic_name__ , **__magic_name__ )
def __A ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> List[str]:
return self.image_processor.post_process_object_detection(*__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , *__magic_name__ : Optional[Any] , **__magic_name__ : int ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__magic_name__ , **__magic_name__ )
def __A ( self : List[Any] , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , *__magic_name__ : Dict , **__magic_name__ : int ) -> str:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def __A ( self : Union[str, Any] ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __magic_name__ , )
return self.image_processor_class
@property
def __A ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __magic_name__ , )
return self.image_processor
| 118
|
import math
from datetime import datetime, timedelta
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = year % 1_9
SCREAMING_SNAKE_CASE_ = year % 4
SCREAMING_SNAKE_CASE_ = year % 7
SCREAMING_SNAKE_CASE_ = math.floor(year / 1_0_0 )
SCREAMING_SNAKE_CASE_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
SCREAMING_SNAKE_CASE_ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
SCREAMING_SNAKE_CASE_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 1_8 )
else:
return datetime(__UpperCamelCase , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
A : Dict = "will be" if year > datetime.now().year else "was"
print(f"Easter in {year} {tense} {gauss_easter(year)}")
| 118
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : BigBirdConfig
UpperCamelCase__ : jnp.dtype =jnp.floataa
UpperCamelCase__ : bool =True
def __lowercase ( self ):
"""simple docstring"""
super().setup()
__UpperCamelCase : Optional[Any] =nn.Dense(5 , dtype=self.dtype )
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =super().__call__(*lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : Tuple =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =FlaxBigBirdForNaturalQuestionsModule
def A ( a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Any:
def cross_entropy(a_ ,a_ ,a_=None ):
__UpperCamelCase : List[Any] =logits.shape[-1]
__UpperCamelCase : List[Any] =(labels[..., None] == jnp.arange(a_ )[None]).astype('f4' )
__UpperCamelCase : Union[str, Any] =jax.nn.log_softmax(a_ ,axis=-1 )
__UpperCamelCase : Tuple =-jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
__UpperCamelCase : str =reduction(a_ )
return loss
__UpperCamelCase : Dict =partial(a_ ,reduction=jnp.mean )
__UpperCamelCase : List[Any] =cross_entropy(a_ ,a_ )
__UpperCamelCase : int =cross_entropy(a_ ,a_ )
__UpperCamelCase : Optional[Any] =cross_entropy(a_ ,a_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : str ="google/bigbird-roberta-base"
UpperCamelCase__ : int =3_0_0_0
UpperCamelCase__ : int =1_0_5_0_0
UpperCamelCase__ : int =1_2_8
UpperCamelCase__ : int =3
UpperCamelCase__ : int =1
UpperCamelCase__ : int =5
# tx_args
UpperCamelCase__ : float =3E-5
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =2_0_0_0_0
UpperCamelCase__ : float =0.0095
UpperCamelCase__ : str ="bigbird-roberta-natural-questions"
UpperCamelCase__ : str ="training-expt"
UpperCamelCase__ : str ="data/nq-training.jsonl"
UpperCamelCase__ : str ="data/nq-validation.jsonl"
def __lowercase ( self ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCamelCase__ )
__UpperCamelCase : List[str] =os.path.join(self.base_dir , self.save_dir )
__UpperCamelCase : Tuple =self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : int
UpperCamelCase__ : int =4_0_9_6 # no dynamic padding on TPUs
def __call__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =self.collate_fn(lowerCamelCase__ )
__UpperCamelCase : Dict =jax.tree_util.tree_map(lowerCamelCase__ , lowerCamelCase__ )
return batch
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int =self.fetch_inputs(features['input_ids'] )
__UpperCamelCase : Any ={
'input_ids': jnp.array(lowerCamelCase__ , dtype=jnp.intaa ),
'attention_mask': jnp.array(lowerCamelCase__ , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[self._fetch_inputs(lowerCamelCase__ ) for ids in input_ids]
return zip(*lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =[1 for _ in range(len(lowerCamelCase__ ) )]
while len(lowerCamelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A ( a_ ,a_ ,a_=None ) -> Any:
if seed is not None:
__UpperCamelCase : Optional[int] =dataset.shuffle(seed=a_ )
for i in range(len(a_ ) // batch_size ):
__UpperCamelCase : str =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a_ )
@partial(jax.pmap ,axis_name='batch' )
def A ( a_ ,a_ ,**a_ ) -> List[Any]:
def loss_fn(a_ ):
__UpperCamelCase : List[str] =model_inputs.pop('start_labels' )
__UpperCamelCase : Union[str, Any] =model_inputs.pop('end_labels' )
__UpperCamelCase : List[str] =model_inputs.pop('pooled_labels' )
__UpperCamelCase : Any =state.apply_fn(**a_ ,params=a_ ,dropout_rng=a_ ,train=a_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] =outputs
return state.loss_fn(
a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,)
__UpperCamelCase , __UpperCamelCase : List[str] =jax.random.split(a_ )
__UpperCamelCase : List[str] =jax.value_and_grad(a_ )
__UpperCamelCase , __UpperCamelCase : Dict =grad_fn(state.params )
__UpperCamelCase : List[Any] =jax.lax.pmean({'loss': loss} ,axis_name='batch' )
__UpperCamelCase : Union[str, Any] =jax.lax.pmean(a_ ,'batch' )
__UpperCamelCase : Union[str, Any] =state.apply_gradients(grads=a_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='batch' )
def A ( a_ ,**a_ ) -> Any:
__UpperCamelCase : Union[str, Any] =model_inputs.pop('start_labels' )
__UpperCamelCase : int =model_inputs.pop('end_labels' )
__UpperCamelCase : List[str] =model_inputs.pop('pooled_labels' )
__UpperCamelCase : Union[str, Any] =state.apply_fn(**a_ ,params=state.params ,train=a_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =outputs
__UpperCamelCase : Dict =state.loss_fn(a_ ,a_ ,a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : Tuple =jax.lax.pmean({'loss': loss} ,axis_name='batch' )
return metrics
class __A ( train_state.TrainState ):
"""simple docstring"""
UpperCamelCase__ : Callable =struct.field(pytree_node=a )
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : Args
UpperCamelCase__ : Callable
UpperCamelCase__ : Callable
UpperCamelCase__ : Callable
UpperCamelCase__ : Callable
UpperCamelCase__ : wandb
UpperCamelCase__ : Callable =None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
__UpperCamelCase : Dict =model.params
__UpperCamelCase : Tuple =TrainState.create(
apply_fn=model.__call__ , params=lowerCamelCase__ , tx=lowerCamelCase__ , loss_fn=lowerCamelCase__ , )
if ckpt_dir is not None:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =restore_checkpoint(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
__UpperCamelCase , __UpperCamelCase : List[Any] =build_tx(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =train_state.TrainState(
step=lowerCamelCase__ , apply_fn=model.__call__ , params=lowerCamelCase__ , tx=lowerCamelCase__ , opt_state=lowerCamelCase__ , )
__UpperCamelCase : int =args
__UpperCamelCase : List[Any] =data_collator
__UpperCamelCase : Union[str, Any] =lr
__UpperCamelCase : Tuple =params
__UpperCamelCase : Optional[int] =jax_utils.replicate(lowerCamelCase__ )
return state
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =self.args
__UpperCamelCase : Optional[Any] =len(lowerCamelCase__ ) // args.batch_size
__UpperCamelCase : Optional[Any] =jax.random.PRNGKey(0 )
__UpperCamelCase : Any =jax.random.split(lowerCamelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCamelCase : Optional[Any] =jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase : Dict =get_batched_dataset(lowerCamelCase__ , args.batch_size , seed=lowerCamelCase__ )
__UpperCamelCase : Dict =0
for batch in tqdm(lowerCamelCase__ , total=lowerCamelCase__ , desc=f'Running EPOCH-{epoch}' ):
__UpperCamelCase : str =self.data_collator(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] =self.train_step_fn(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
__UpperCamelCase : Optional[Any] =jax_utils.unreplicate(state.step )
__UpperCamelCase : Any =running_loss.item() / i
__UpperCamelCase : int =self.scheduler_fn(state_step - 1 )
__UpperCamelCase : List[str] =self.evaluate(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] ={
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowerCamelCase__ ) )
self.logger.log(lowerCamelCase__ , commit=lowerCamelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Tuple =get_batched_dataset(lowerCamelCase__ , self.args.batch_size )
__UpperCamelCase : Optional[int] =len(lowerCamelCase__ ) // self.args.batch_size
__UpperCamelCase : Optional[int] =jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase : List[Any] =0
for batch in tqdm(lowerCamelCase__ , total=lowerCamelCase__ , desc='Evaluating ... ' ):
__UpperCamelCase : Optional[int] =self.data_collator(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.val_step_fn(lowerCamelCase__ , **lowerCamelCase__ )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =jax_utils.unreplicate(lowerCamelCase__ )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ' )
self.model_save_fn(lowerCamelCase__ , params=state.params )
with open(os.path.join(lowerCamelCase__ , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCamelCase__ , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(lowerCamelCase__ , 'data_collator.joblib' ) )
with open(os.path.join(lowerCamelCase__ , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , lowerCamelCase__ )
print('DONE' )
def A ( a_ ,a_ ) -> Tuple:
print(F'RESTORING CHECKPOINT FROM {save_dir}' ,end=' ... ' )
with open(os.path.join(a_ ,'flax_model.msgpack' ) ,'rb' ) as f:
__UpperCamelCase : str =from_bytes(state.params ,f.read() )
with open(os.path.join(a_ ,'opt_state.msgpack' ) ,'rb' ) as f:
__UpperCamelCase : Tuple =from_bytes(state.opt_state ,f.read() )
__UpperCamelCase : Tuple =joblib.load(os.path.join(a_ ,'args.joblib' ) )
__UpperCamelCase : List[str] =joblib.load(os.path.join(a_ ,'data_collator.joblib' ) )
with open(os.path.join(a_ ,'training_state.json' ) ,'r' ) as f:
__UpperCamelCase : Optional[Any] =json.load(a_ )
__UpperCamelCase : Optional[Any] =training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def A ( a_ ,a_ ,a_ ,a_ ) -> List[Any]:
__UpperCamelCase : List[str] =num_train_steps - warmup_steps
__UpperCamelCase : Dict =optax.linear_schedule(init_value=a_ ,end_value=a_ ,transition_steps=a_ )
__UpperCamelCase : Union[str, Any] =optax.linear_schedule(init_value=a_ ,end_value=1e-7 ,transition_steps=a_ )
__UpperCamelCase : List[str] =optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> int:
def weight_decay_mask(a_ ):
__UpperCamelCase : Any =traverse_util.flatten_dict(a_ )
__UpperCamelCase : Optional[int] ={k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(a_ )
__UpperCamelCase : Dict =scheduler_fn(a_ ,a_ ,a_ ,a_ )
__UpperCamelCase : List[str] =optax.adamw(learning_rate=a_ ,weight_decay=a_ ,mask=a_ )
return tx, lr
| 245
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ :Any = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
A_ :Dict = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
A_ :int = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Tuple = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
A_ :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
A_ :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
A_ :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
A_ :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
A_ :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
A_ :Union[str, Any] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
A_ :Optional[Any] = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
A_ :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
A_ :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
A_ :str = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
A_ :str = ''''''
A_ :Optional[int] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
A_ :Dict = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
A_ :List[str] = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[str]:
assert ReadMe.from_string(a_ ,a_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> int:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
__UpperCamelCase : List[Any] =ReadMe.from_string(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Union[str, Any]:
with pytest.raises(a_ ,match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Tuple:
ReadMe.from_string(a_ ,a_ ,suppress_parsing_errors=a_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def A ( a_ ,a_ ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Dict =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[Any] =ReadMe.from_readme(a_ ,a_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def A ( a_ ,a_ ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Any =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
__UpperCamelCase : List[str] =ReadMe.from_readme(a_ ,a_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ,a_ ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
__UpperCamelCase : Optional[int] =expected_error.format(path=a_ )
with pytest.raises(a_ ,match=re.escape(a_ ) ):
ReadMe.from_readme(a_ ,a_ )
@pytest.mark.parametrize(
'readme_md,' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def A ( a_ ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Union[str, Any] =Path(a_ ) / 'README.md'
with open(a_ ,'w+' ) as readme_file:
readme_file.write(a_ )
ReadMe.from_readme(a_ ,a_ ,suppress_parsing_errors=a_ )
| 245
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
A__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class _A :
lowercase__: str
lowercase__: List[str]
lowercase__: Optional[List[str]]
@dataclass
class _A :
lowercase__: List[int]
lowercase__: List[int]
lowercase__: Optional[List[int]] = None
lowercase__: Optional[List[int]] = None
class _A ( lowercase_ ):
lowercase__: str = "train"
lowercase__: Optional[int] = "dev"
lowercase__: Union[str, Any] = "test"
class _A :
@staticmethod
def lowercase__ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowercase__ ( __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def lowercase__ ( __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str="[SEP]" , __magic_name__ : Union[str, Any]=False , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=0 , __magic_name__ : Optional[Any]=0 , __magic_name__ : Dict=-1_00 , __magic_name__ : Any=0 , __magic_name__ : Any=True , ) -> List[InputFeatures]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(a__ )}
__snake_case : Union[str, Any] = []
for ex_index, example in enumerate(a__ ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , a__ , len(a__ ) )
__snake_case : Dict = []
__snake_case : Union[str, Any] = []
for word, label in zip(example.words , example.labels ):
__snake_case : List[str] = tokenizer.tokenize(a__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(a__ ) > 0:
tokens.extend(a__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(a__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(a__ ) > max_seq_length - special_tokens_count:
__snake_case : Tuple = tokens[: (max_seq_length - special_tokens_count)]
__snake_case : Dict = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case : Dict = [sequence_a_segment_id] * len(a__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case : Any = [cls_token] + tokens
__snake_case : Union[str, Any] = [pad_token_label_id] + label_ids
__snake_case : List[str] = [cls_token_segment_id] + segment_ids
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case : Any = [1 if mask_padding_with_zero else 0] * len(a__ )
# Zero-pad up to the sequence length.
__snake_case : Tuple = max_seq_length - len(a__ )
if pad_on_left:
__snake_case : Optional[Any] = ([pad_token] * padding_length) + input_ids
__snake_case : List[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case : Tuple = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
assert len(a__ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(a__ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(a__ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(a__ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(a__ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(a__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : int = None
features.append(
InputFeatures(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , label_ids=a__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _A ( lowercase_ ):
lowercase__: List[InputFeatures]
lowercase__: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : List[Any] = None , __magic_name__ : Any=False , __magic_name__ : Tuple = Split.train , ) -> str:
"""simple docstring"""
__snake_case : Dict = os.path.join(
a__ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(a__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : Optional[Any] = cached_features_file + """.lock"""
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__snake_case : str = torch.load(a__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__snake_case : Dict = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Any = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , a__ )
def __len__( self : Dict ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , __magic_name__ : List[str] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _A :
lowercase__: List[InputFeatures]
lowercase__: int = -100
def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict = None , __magic_name__ : Dict=False , __magic_name__ : Optional[Any] = Split.train , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = token_classification_task.read_examples_from_file(a__ , a__ )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : str = token_classification_task.convert_examples_to_features(
a__ , a__ , a__ , a__ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=a__ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
a__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
a__ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : str ) -> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict , __magic_name__ : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 353
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
__UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= "microsoft/speecht5_tts"
_a : Tuple= (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_a : Dict= "text_reader"
_a : Optional[Any]= SpeechTaProcessor
_a : Tuple= SpeechTaForTextToSpeech
_a : Optional[int]= SpeechTaHifiGan
_a : Union[str, Any]= ["text"]
_a : Optional[int]= ["audio"]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.post_processor is None:
lowercase : Any = """microsoft/speecht5_hifigan"""
super().setup()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : int = self.pre_processor(text=snake_case ,return_tensors="""pt""" ,truncation=snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
lowercase : Tuple = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
lowercase : List[str] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(snake_case ).cpu().detach()
| 20
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Dict , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 172
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = KandinskyVaaPipeline
__A : Any = [
"image_embeds",
"negative_image_embeds",
]
__A : Tuple = ["image_embeds", "negative_image_embeds"]
__A : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A : Union[str, Any] = False
@property
def __snake_case ( self : str ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase :int = UNetaDConditionModel(**snake_case__ )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.dummy_unet
lowercase :List[Any] = self.dummy_movq
lowercase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
lowercase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str=0 ):
'''simple docstring'''
lowercase :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Optional[int] = torch.manual_seed(snake_case__ )
else:
lowercase :Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = '''cpu'''
lowercase :Tuple = self.get_dummy_components()
lowercase :Any = self.pipeline_class(**snake_case__ )
lowercase :List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase :str = output.images
lowercase :Dict = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase :Any = image[0, -3:, -3:, -1]
lowercase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :List[Any] = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowercase :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase :Tuple = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase :str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase :int = '''red cat, 4k photo'''
lowercase :str = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase , lowercase :Union[str, Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase :Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase :List[Any] = pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , output_type='''np''' , )
lowercase :Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 172
| 1
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ = """zero2"""
SCREAMING_SNAKE_CASE__ = """zero3"""
SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__lowercase = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A__ ( lowerCAmelCase__ ):
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> List[str]:
"""simple docstring"""
__lowercase = models[model]
__lowercase = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def a__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_auto_remove_tmp_dir('./xxx' , after=_UpperCAmelCase )
__lowercase = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__lowercase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__lowercase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__lowercase = self.get_launcher(_UpperCAmelCase )
__lowercase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def a__ ( self : str , _UpperCAmelCase : List[Any]=False ) -> Tuple:
"""simple docstring"""
__lowercase = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 325
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325
| 1
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_a : int = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_a : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_a : List[Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : int) -> str:
'''simple docstring'''
return float((preds == labels).mean())
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase = simple_accuracy(UpperCamelCase_, UpperCamelCase_)
__lowercase = float(fa_score(y_true=UpperCamelCase_, y_pred=UpperCamelCase_))
return {
"accuracy": acc,
"f1": fa,
}
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : int) -> Optional[int]:
'''simple docstring'''
__lowercase = np.array(UpperCamelCase_)
__lowercase = np.array(UpperCamelCase_)
__lowercase = en_sentvecs.shape[0]
# mean centering
__lowercase = en_sentvecs - np.mean(UpperCamelCase_, axis=0)
__lowercase = in_sentvecs - np.mean(UpperCamelCase_, axis=0)
__lowercase = cdist(UpperCamelCase_, UpperCamelCase_, "cosine")
__lowercase = np.array(range(UpperCamelCase_))
__lowercase = sim.argsort(axis=1)[:, :10]
__lowercase = np.any(preds == actual[:, None], axis=1)
return float(matches.mean())
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : List[str] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ), codebase_urls=[], reference_urls=[], format="numpy" if self.config_name != "cvit-mkb-clsr" else None, )
def _lowercase ( self : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Tuple ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase__, UpperCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase__, UpperCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase__, UpperCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 363
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : int ):
__lowercase = num_of_nodes
__lowercase = []
__lowercase = {}
def _lowercase ( self : Tuple, UpperCAmelCase__ : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
self.m_edges.append([u_node, v_node, weight] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowercase = self.find_component(UpperCAmelCase__ )
def _lowercase ( self : str, UpperCAmelCase__ : list[int], UpperCAmelCase__ : int, UpperCAmelCase__ : int ):
if component_size[u_node] <= component_size[v_node]:
__lowercase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
__lowercase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = []
__lowercase = 0
__lowercase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowercase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowercase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase ,__lowercase ,__lowercase = edge
__lowercase = self.m_component[u]
__lowercase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowercase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _A ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a_ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="ernie_m"
UpperCamelCase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 25_00_02 , UpperCamelCase_ = 7_68 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 30_72 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 5_14 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1E-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = classifier_dropout
__lowercase : List[Any] = is_decoder
__lowercase : List[str] = act_dropout
| 249
| 1
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 182
| 0
|
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list ):
if not input_list:
return []
__UpperCAmelCase = [input_list.count(snake_case_ ) for value in input_list]
__UpperCAmelCase = max(snake_case_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A:
'''simple docstring'''
UpperCamelCase = XGLMConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : Union[str, Any] , A_ : int , A_ : Any=14 , A_ : Optional[Any]=7 , A_ : int=True , A_ : List[Any]=True , A_ : Optional[int]=True , A_ : Dict=99 , A_ : int=32 , A_ : Tuple=2 , A_ : List[Any]=4 , A_ : str=37 , A_ : Dict="gelu" , A_ : Optional[int]=0.1 , A_ : int=0.1 , A_ : str=512 , A_ : Dict=0.02 , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = ffn_dim
lowerCamelCase_ = activation_function
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = 2
lowerCamelCase_ = 1
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a__ ( self : int ) -> int:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFXGLMModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , n_embd=37 )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def a__ ( self : int ) -> Any:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : int , A_ : Dict=True ) -> Any:
"""simple docstring"""
lowerCamelCase_ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
lowerCamelCase_ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCamelCase_ = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
lowerCamelCase_ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
lowerCamelCase_ = tokenizer('Today is a nice day and' , return_tensors='tf' )
lowerCamelCase_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
lowerCamelCase_ = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
lowerCamelCase_ = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
lowerCamelCase_ = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
lowerCamelCase_ = 'left'
# use different length sentences to test batching
lowerCamelCase_ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
lowerCamelCase_ = tokenizer(A_ , return_tensors='tf' , padding=A_ )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=A_ , max_new_tokens=12 )
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCamelCase_ = model.generate(input_ids=A_ , max_new_tokens=12 )
lowerCamelCase_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 350
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''OwlViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Tuple=None , **A_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_ , A_ )
def __call__( self : List[str] , A_ : List[str]=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Tuple="max_length" , A_ : int="np" , **A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
lowerCamelCase_ = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
lowerCamelCase_ = t + [' '] * (max_num_queries - len(A_ ))
lowerCamelCase_ = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def a__ ( self : Tuple , *A_ : Dict , **A_ : Dict ) -> Any:
"""simple docstring"""
return self.image_processor.post_process(*A_ , **A_ )
def a__ ( self : List[str] , *A_ : Any , **A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def a__ ( self : Any , *A_ : str , **A_ : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def a__ ( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def a__ ( self : Optional[int] , *A_ : List[Any] , **A_ : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , )
return self.image_processor_class
@property
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A_ , )
return self.image_processor
| 208
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
A__ : Optional[int] = TypeVar('T')
class lowercase__ ( Generic[T] ):
def __init__( self : Tuple , snake_case__ : T ):
lowerCamelCase_ : Optional[int] =data
lowerCamelCase_ : Node[T] | None =None
def __str__( self : Tuple ):
return F"""{self.data}"""
class lowercase__ ( Generic[T] ):
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Node[T] | None =None
def __iter__( self : Tuple ):
lowerCamelCase_ : List[Any] =self.top
while node:
yield node.data
lowerCamelCase_ : str =node.next
def __str__( self : List[str] ):
return "->".join([str(snake_case__ ) for item in self] )
def __len__( self : Any ):
return len(tuple(iter(self ) ) )
def UpperCAmelCase__ ( self : int ):
return self.top is None
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : T ):
lowerCamelCase_ : Union[str, Any] =Node(snake_case__ )
if not self.is_empty():
lowerCamelCase_ : Tuple =self.top
lowerCamelCase_ : List[Any] =node
def UpperCAmelCase__ ( self : Dict ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , snake_case__ )
lowerCamelCase_ : Dict =self.top
lowerCamelCase_ : List[str] =self.top.next
return pop_node.data
def UpperCAmelCase__ ( self : Tuple ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __magic_name__ ( lowercase ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowercase )
EnvironmentCommand.register_subcommand(lowercase )
TestCommand.register_subcommand(lowercase )
RunBeamCommand.register_subcommand(lowercase )
DummyDataCommand.register_subcommand(lowercase )
# Parse args
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_known_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE_: Dict =parse_unknown_args(lowercase )
# Run
SCREAMING_SNAKE_CASE_: Tuple =args.func(lowercase , **lowercase )
service.run()
if __name__ == "__main__":
main()
| 173
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=None , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : List[Any] = use_token_type_ids
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : List[Any] = use_mc_token_ids
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : List[str] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : List[str] = type_vocab_size
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Union[str, Any] = num_choices
lowerCAmelCase : List[str] = scope
lowerCAmelCase : List[str] = self.vocab_size - 1
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_mc_token_ids:
lowerCAmelCase : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = self.get_config()
lowerCAmelCase : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , *UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Optional[Any] = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Union[str, Any] = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase_ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase_ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : str = False
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = CTRLModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def lowercase__ ( self : Tuple ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase__ ( self : Tuple ):
pass
@slow
def lowercase__ ( self : List[str] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : str = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(UpperCAmelCase_ )
lowerCAmelCase : List[str] = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
lowerCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase : int = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : int = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = SpeechTaTokenizer
snake_case__ = False
snake_case__ = True
def __lowerCAmelCase ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = SpeechTaTokenizer(lowerCamelCase__ )
UpperCAmelCase__ = AddedToken('<mask>' ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ )
UpperCAmelCase__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = '''this is a test'''
UpperCAmelCase__ = '''this is a test'''
return input_text, output_text
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : List[Any]=20 ,lowerCamelCase__ : str=5 ):
UpperCAmelCase__ = self.get_input_output_texts(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.decode(lowerCamelCase__ ,clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = '''<pad>'''
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(lowerCamelCase__ ) ,81 )
def __lowerCAmelCase ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase__ = tokenizer.add_tokens(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,all_size + len(lowerCamelCase__ ) )
UpperCAmelCase__ = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
UpperCAmelCase__ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase__ = tokenizer.add_special_tokens(lowerCamelCase__ )
UpperCAmelCase__ = tokenizer.vocab_size
UpperCAmelCase__ = len(lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,0 )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,len(lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,all_size_a + len(lowerCamelCase__ ) )
UpperCAmelCase__ = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=lowerCamelCase__ )
self.assertGreaterEqual(len(lowerCamelCase__ ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def __lowerCAmelCase ( self : int ):
pass
def __lowerCAmelCase ( self : List[Any] ):
pass
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(lowerCamelCase__ ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase__ ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
# fmt: off
self.assertListEqual(lowerCamelCase__ ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase__ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=lowerCamelCase__ ,)
| 98
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = 256
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["melgan"]
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
UpperCamelCase__ : int = 4.0 # Largest value for most examples
UpperCamelCase__ : Optional[int] = 128
self.register_modules(
notes_encoder=__magic_name__, continuous_encoder=__magic_name__, decoder=__magic_name__, scheduler=__magic_name__, melgan=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = output_range
if clip:
UpperCamelCase__ : Union[str, Any] = torch.clip(__magic_name__, self.min_value, self.max_value )
# Scale to [0, 1].
UpperCamelCase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = input_range
UpperCamelCase__ : Any = torch.clip(__magic_name__, __magic_name__, __magic_name__ ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = input_tokens > 0
UpperCamelCase__ ,UpperCamelCase__ : Any = self.notes_encoder(
encoder_input_tokens=__magic_name__, encoder_inputs_mask=__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=__magic_name__, encoder_inputs_mask=__magic_name__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = noise_time
if not torch.is_tensor(__magic_name__ ):
UpperCamelCase__ : Tuple = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Dict = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
UpperCamelCase__ : List[str] = self.decoder(
encodings_and_masks=__magic_name__, decoder_input_tokens=__magic_name__, decoder_noise_time=__magic_name__ )
return logits
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__ = None, __magic_name__ = 100, __magic_name__ = True, __magic_name__ = "numpy", __magic_name__ = None, __magic_name__ = 1, ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__, __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__magic_name__ )}." )
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
UpperCamelCase__ : Tuple = np.zeros([1, 0, self.n_dims], np.floataa )
UpperCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
for i, encoder_input_tokens in enumerate(__magic_name__ ):
if i == 0:
UpperCamelCase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : List[str] = ones
UpperCamelCase__ : int = self.scale_features(
__magic_name__, output_range=[-1.0, 1.0], clip=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=__magic_name__, continuous_mask=__magic_name__, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape, generator=__magic_name__, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase__ : Union[str, Any] = self.decode(
encodings_and_masks=__magic_name__, input_tokens=__magic_name__, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(__magic_name__, __magic_name__, __magic_name__, generator=__magic_name__ ).prev_sample
UpperCamelCase__ : List[Any] = self.scale_to_features(__magic_name__, input_range=[-1.0, 1.0] )
UpperCamelCase__ : List[Any] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : Union[str, Any] = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__, __magic_name__ )
logger.info('''Generated segment''', __magic_name__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
UpperCamelCase__ : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase__ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__magic_name__ )
| 201
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : str = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """pix2struct_text_model"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any=5_0_2_4_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=6_4 , SCREAMING_SNAKE_CASE_ : Any=2_0_4_8 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : str=1_2 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=1_2_8 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[Any]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu_new" , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Optional[int] = d_kv
lowerCAmelCase_ : Optional[int] = d_ff
lowerCAmelCase_ : List[Any] = num_layers
lowerCAmelCase_ : Optional[Any] = num_heads
lowerCAmelCase_ : Optional[int] = relative_attention_num_buckets
lowerCAmelCase_ : str = relative_attention_max_distance
lowerCAmelCase_ : Tuple = dropout_rate
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : Dict = initializer_factor
lowerCAmelCase_ : int = use_cache
lowerCAmelCase_ : Optional[int] = eos_token_id
lowerCAmelCase_ : Any = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase_ : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , is_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : int ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ ,lowerCAmelCase_ : int = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCAmelCase_ : List[str] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """pix2struct_vision_model"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Any=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=6_4 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE_ : Any=1E-6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=1E-10 , SCREAMING_SNAKE_CASE_ : List[str]=1.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2_8 , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : str = patch_embed_hidden_size
lowerCAmelCase_ : Optional[int] = d_ff
lowerCAmelCase_ : str = dropout_rate
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Any = initializer_factor
lowerCAmelCase_ : Any = attention_dropout
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : Any = dense_act_fn
lowerCAmelCase_ : int = seq_len
lowerCAmelCase_ : List[Any] = relative_attention_num_buckets
lowerCAmelCase_ : Tuple = relative_attention_max_distance
lowerCAmelCase_ : Any = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : str ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
lowerCAmelCase_ : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """pix2struct"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : str=True , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text_config is None:
lowerCAmelCase_ : Tuple = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase_ : str = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
lowerCAmelCase_ : Optional[int] = PixaStructTextConfig(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self.text_config.decoder_start_token_id
lowerCAmelCase_ : str = self.text_config.pad_token_id
lowerCAmelCase_ : int = self.text_config.eos_token_id
lowerCAmelCase_ : int = initializer_factor
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[str] = self.initializer_range
lowerCAmelCase_ : Optional[Any] = self.initializer_range
lowerCAmelCase_ : Optional[Any] = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , SCREAMING_SNAKE_CASE_ : PixaStructTextConfig , SCREAMING_SNAKE_CASE_ : PixaStructVisionConfig , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Any = self.text_config.to_dict()
lowerCAmelCase_ : Tuple = self.vision_config.to_dict()
lowerCAmelCase_ : Tuple = self.__class__.model_type
return output
| 289
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : str = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[str] = [1, 3, 0, 5, 8, 5]
lowercase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 289
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ViTFeatureExtractor"""]
UpperCamelCase__ = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13
| 0
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
require_version(deps[pkg] , snake_case_ )
| 369
|
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : Optional[int] = len(__lowerCAmelCase ) + 1
snake_case__ : Tuple = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ : str = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
snake_case__ : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
snake_case__ : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
snake_case__ : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ : List[str] = dp[i - 1][j]
else:
snake_case__ : Union[str, Any] = 0
else:
snake_case__ : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A__ = '''aab'''
A__ = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 44
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase : List[str] = getLogger(__name__)
UpperCAmelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int = 8 , lowerCamelCase__ : str = DEFAULT_DEVICE , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]="summarization" , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : str , ):
'''simple docstring'''
lowerCamelCase = Path(_snake_case ).open("""w""" , encoding="""utf-8""" )
lowerCamelCase = str(_snake_case )
lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case )
if fpaa:
lowerCamelCase = model.half()
lowerCamelCase = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_snake_case , _snake_case )
if prefix is None:
lowerCamelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ):
lowerCamelCase = [prefix + text for text in examples_chunk]
lowerCamelCase = tokenizer(_snake_case , return_tensors="""pt""" , truncation=_snake_case , padding="""longest""" ).to(_snake_case )
lowerCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , )
lowerCamelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
lowerCamelCase = int(time.time() - start_time ) # seconds
lowerCamelCase = len(_snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowerCamelCase ( ):
'''simple docstring'''
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def __lowerCamelCase ( lowerCamelCase__ : List[Any]=True ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_snake_case , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_snake_case , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_snake_case , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_snake_case , required=_snake_case , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_snake_case , required=_snake_case , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_snake_case , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_snake_case , default=8 , required=_snake_case , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_snake_case , default=-1 , required=_snake_case , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_snake_case , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase , lowerCamelCase = parser.parse_known_args()
lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_snake_case )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
lowerCamelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can\'t mix --fp16 and --device cpu""" )
lowerCamelCase = generate_summaries_or_translations(
_snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )]
lowerCamelCase = score_fn(_snake_case , _snake_case )
scores.update(_snake_case )
if args.dump_args:
scores.update(_snake_case )
if args.info:
lowerCamelCase = args.info
if verbose:
print(_snake_case )
if args.score_path is not None:
json.dump(_snake_case , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 252
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[str] ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ):
_A = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : List[Any] ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_downsample:
_A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ):
_A = ()
for resnet in self.resnets:
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
_A = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = resnets
_A = attentions
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : bool = True
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Any ):
_A = []
for i in range(self.num_layers ):
_A = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_A = self.prev_output_channel if i == 0 else self.out_channels
_A = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
if self.add_upsample:
_A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ):
for resnet in self.resnets:
# pop res hidden states
_A = res_hidden_states_tuple[-1]
_A = res_hidden_states_tuple[:-1]
_A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
_A = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class lowercase_ ( nn.Module ):
'''simple docstring'''
UpperCAmelCase : int
UpperCAmelCase : float = 0.0
UpperCAmelCase : int = 1
UpperCAmelCase : int = 1
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase_ ( self : Dict ):
# there is always at least one resnet
_A = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_A = []
for _ in range(self.num_layers ):
_A = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
_A = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
_A = resnets
_A = attentions
def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ):
_A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
_A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 315
| 0
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[int] , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ):
super().__init__()
lowerCAmelCase_ : Optional[int] = initial_learning_rate
lowerCAmelCase_ : Tuple = warmup_steps
lowerCAmelCase_ : Dict = power
lowerCAmelCase_ : List[Any] = decay_schedule_fn
lowerCAmelCase_ : Any = name
def __call__( self : Dict , UpperCAmelCase : Union[str, Any] ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase_ : Any = tf.cast(UpperCAmelCase , tf.floataa )
lowerCAmelCase_ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase_ : Tuple = global_step_float / warmup_steps_float
lowerCAmelCase_ : Tuple = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def A ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int , lowercase__ : int , lowercase__ : float = 0.0 , lowercase__ : float = 0.9 , lowercase__ : float = 0.999 , lowercase__ : float = 1E-8 , lowercase__ : Optional[float] = None , lowercase__ : Optional[float] = None , lowercase__ : float = 0.0 , lowercase__ : float = 1.0 , lowercase__ : Optional[List[str]] = None , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , )
if num_warmup_steps:
lowerCAmelCase_ : Optional[int] = WarmUp(
initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , )
if weight_decay_rate > 0.0:
lowerCAmelCase_ : Union[str, Any] = AdamWeightDecay(
learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowercase__ , )
else:
lowerCAmelCase_ : Optional[Any] = tf.keras.optimizers.Adam(
learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a ( __UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1e-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : List[str] , ):
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = weight_decay_rate
lowerCAmelCase_ : Tuple = include_in_weight_decay
lowerCAmelCase_ : Optional[Any] = exclude_from_weight_decay
@classmethod
def A ( cls : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = {"""WarmUp""": WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Optional[int] = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase_ : Dict = apply_state or {}
lowerCAmelCase_ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase_ : Any = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str=None ):
lowerCAmelCase_ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
lowerCAmelCase_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Tuple = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def A ( self : int , UpperCAmelCase : Union[str, Any] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class __a ( __UpperCamelCase ):
def __init__( self : List[Any] ):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[Any] = None
@property
def A ( self : Any ):
if self._accum_steps is None:
lowerCAmelCase_ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A ( self : Tuple ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , UpperCAmelCase : List[Any] ):
if not self._gradients:
lowerCAmelCase_ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def A ( self : Dict ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 369
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 0
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A: List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ):
for attribute in key.split(""".""" ):
UpperCAmelCase : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
UpperCAmelCase : int = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
UpperCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : List[Any] = hf_model.feature_extractor
UpperCAmelCase : List[Any] = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : List[str] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase : Union[str, Any] = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
UpperCAmelCase : Tuple = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
UpperCAmelCase : Optional[int] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase : int = "weight_v"
elif "bias" in name:
UpperCAmelCase : Optional[int] = "bias"
elif "weight" in name:
UpperCAmelCase : List[Any] = "weight"
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : int ):
UpperCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Tuple = name.split(""".""" )
UpperCAmelCase : Any = int(items[0] )
UpperCAmelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase__ )
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : str ):
UpperCAmelCase : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
UpperCAmelCase : List[Any] = name.split(""".""" )
if items[1].isdigit():
UpperCAmelCase : int = int(items[1] )
else:
UpperCAmelCase : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
UpperCAmelCase : Optional[int] = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
UpperCAmelCase : Optional[int] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
UpperCAmelCase : int = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
UpperCAmelCase : Any = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
UpperCAmelCase : Tuple = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
UpperCAmelCase : Any = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase__ )
def _snake_case ( UpperCamelCase : Optional[int] ):
UpperCAmelCase : Union[str, Any] = emb.weight.shape
UpperCAmelCase : str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ):
UpperCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(
lowerCamelCase__ , add_adapter=lowerCamelCase__ , adapter_stride=lowerCamelCase__ , adapter_kernel_size=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , output_hidden_size=lowerCamelCase__ , )
UpperCAmelCase : Optional[Any] = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
UpperCAmelCase : List[Any] = model[0].eval()
# load feature extractor
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
UpperCAmelCase : Any = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
# load decoder weights
UpperCAmelCase : Union[str, Any] = MBartForCausalLM(lowerCamelCase__ )
UpperCAmelCase : Optional[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCAmelCase : Tuple = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Tuple = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
UpperCAmelCase : int = hf_wavavec.config.to_dict()
UpperCAmelCase : List[Any] = tokenizer.pad_token_id
UpperCAmelCase : List[str] = tokenizer.bos_token_id
UpperCAmelCase : Optional[Any] = tokenizer.eos_token_id
UpperCAmelCase : int = "mbart50"
UpperCAmelCase : str = "wav2vec2"
UpperCAmelCase : Tuple = tokenizer.eos_token_id
UpperCAmelCase : Optional[int] = 250004
UpperCAmelCase : Any = tokenizer.eos_token_id
UpperCAmelCase : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
A: Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_0_2_4, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=2_5_0_0_0_4, type=int, help="`decoder_start_token_id` of model config")
A: Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 109
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
class lowercase__ ( nn.Module ):
_UpperCAmelCase :int
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
_UpperCAmelCase :jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Any =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels[i]
lowerCamelCase_ : Any =self.block_out_channels[i + 1]
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case__ )
lowerCamelCase_ : Union[str, Any] =blocks
lowerCamelCase_ : Optional[Any] =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : int =self.conv_in(snake_case__ )
lowerCamelCase_ : List[Any] =nn.silu(snake_case__ )
for block in self.blocks:
lowerCamelCase_ : Union[str, Any] =block(snake_case__ )
lowerCamelCase_ : List[str] =nn.silu(snake_case__ )
lowerCamelCase_ : Tuple =self.conv_out(snake_case__ )
return embedding
@flax_register_to_config
class lowercase__ ( nn.Module, snake_case__, snake_case__ ):
_UpperCAmelCase :int = 32
_UpperCAmelCase :int = 4
_UpperCAmelCase :Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCAmelCase :Union[bool, Tuple[bool]] = False
_UpperCAmelCase :Tuple[int] = (320, 640, 1280, 1280)
_UpperCAmelCase :int = 2
_UpperCAmelCase :Union[int, Tuple[int]] = 8
_UpperCAmelCase :Optional[Union[int, Tuple[int]]] = None
_UpperCAmelCase :int = 1280
_UpperCAmelCase :float = 0.0
_UpperCAmelCase :bool = False
_UpperCAmelCase :jnp.dtype = jnp.floataa
_UpperCAmelCase :bool = True
_UpperCAmelCase :int = 0
_UpperCAmelCase :str = "rgb"
_UpperCAmelCase :Tuple[int] = (16, 32, 96, 256)
def UpperCAmelCase__ ( self : int , snake_case__ : jax.random.KeyArray ):
# init input tensors
lowerCamelCase_ : str =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : List[Any] =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ : int =jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Union[str, Any] =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : Any =jnp.zeros(snake_case__ , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ : Any =jax.random.split(snake_case__ )
lowerCamelCase_ : Tuple ={"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =self.block_out_channels
lowerCamelCase_ : Optional[int] =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : int =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Any =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Union[str, Any] =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : List[Any] =FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
lowerCamelCase_ : List[str] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Optional[int] =self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : str =(only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Any =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Optional[int] =[]
lowerCamelCase_ : Optional[Any] =[]
lowerCamelCase_ : str =block_out_channels[0]
lowerCamelCase_ : str =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : Union[str, Any] =output_channel
lowerCamelCase_ : Tuple =block_out_channels[i]
lowerCamelCase_ : List[Any] =i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : Tuple =FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Union[str, Any] =FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : List[Any] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
if not is_final_block:
lowerCamelCase_ : Any =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case__ )
lowerCamelCase_ : List[str] =down_blocks
lowerCamelCase_ : int =controlnet_down_blocks
# mid
lowerCamelCase_ : int =block_out_channels[-1]
lowerCamelCase_ : str =FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : List[str] =nn.Conv(
snake_case__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : float = 1.0 , snake_case__ : bool = True , snake_case__ : bool = False , ):
lowerCamelCase_ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : Optional[Any] =jnp.flip(snake_case__ , axis=1 )
# 1. time
if not isinstance(snake_case__ , jnp.ndarray ):
lowerCamelCase_ : Dict =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Any =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Optional[Any] =jnp.expand_dims(snake_case__ , 0 )
lowerCamelCase_ : Any =self.time_proj(snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.time_embedding(snake_case__ )
# 2. pre-process
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : Union[str, Any] =self.conv_in(snake_case__ )
lowerCamelCase_ : List[str] =jnp.transpose(snake_case__ , (0, 2, 3, 1) )
lowerCamelCase_ : str =self.controlnet_cond_embedding(snake_case__ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : List[str] =(sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : Optional[int] =self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : Dict =()
for down_block_res_sample, controlnet_block in zip(snake_case__ , self.controlnet_down_blocks ):
lowerCamelCase_ : Dict =controlnet_block(snake_case__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : List[Any] =controlnet_down_block_res_samples
lowerCamelCase_ : Tuple =self.controlnet_mid_block(snake_case__ )
# 6. scaling
lowerCamelCase_ : Dict =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case__ , mid_block_res_sample=snake_case__ )
| 144
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__SCREAMING_SNAKE_CASE : str = '\\n\n'
__SCREAMING_SNAKE_CASE : Dict = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__SCREAMING_SNAKE_CASE : Tuple = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Any=None ) ->Optional[int]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case_ = """cuda"""
else:
snake_case_ = """cuda""" if torch.cuda.is_available() else """cpu"""
snake_case_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase_ )
snake_case_ = model.to(UpperCAmelCase_ )
snake_case_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case_ = model.config.max_length - 1
else:
snake_case_ = model.config.max_length
snake_case_ = tokenizer(
UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors="""pt""" , return_attention_mask=UpperCAmelCase_ , ).to(UpperCAmelCase_ )
snake_case_ = encodings["""input_ids"""]
snake_case_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case_ = []
snake_case_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ):
snake_case_ = min(start_index + batch_size , len(UpperCAmelCase_ ) )
snake_case_ = encoded_texts[start_index:end_index]
snake_case_ = attn_masks[start_index:end_index]
if add_start_token:
snake_case_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase_ )
snake_case_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase_ ), attn_mask] , dim=1 )
snake_case_ = encoded_batch
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).logits
snake_case_ = out_logits[..., :-1, :].contiguous()
snake_case_ = labels[..., 1:].contiguous()
snake_case_ = attn_mask[..., 1:].contiguous()
snake_case_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase_ )}
| 371
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''dpr'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_5_2_2 ,SCREAMING_SNAKE_CASE__ : Any=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=1_2 ,SCREAMING_SNAKE_CASE__ : Any=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : int=5_1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : List[Any]="absolute" ,SCREAMING_SNAKE_CASE__ : int = 0 ,**SCREAMING_SNAKE_CASE__ : Dict ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : List[str] = layer_norm_eps
__lowerCamelCase : Any = projection_dim
__lowerCamelCase : Union[str, Any] = position_embedding_type
| 73
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79
| 0
|
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE_ = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
SCREAMING_SNAKE_CASE_ = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def _UpperCAmelCase ( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None , A_="warn" , ):
'''simple docstring'''
_UpperCAmelCase : Any = recall_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , )
return {"recall": float(lowerCAmelCase__ ) if score.size == 1 else score}
| 366
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = 'ResNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE_ = 'microsoft/resnet-50'
SCREAMING_SNAKE_CASE_ = [1, 2048, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = 'microsoft/resnet-50'
SCREAMING_SNAKE_CASE_ = 'tiger cat'
SCREAMING_SNAKE_CASE_ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ = 3 , A_ = 1 , A_ = "relu" ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Union[str, Any] = nn.Convad(
A_ , A_ , kernel_size=A_ , stride=A_ , padding=kernel_size // 2 , bias=A_ )
_UpperCAmelCase : List[Any] = nn.BatchNormad(A_ )
_UpperCAmelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.convolution(A_ )
_UpperCAmelCase : Optional[int] = self.normalization(A_ )
_UpperCAmelCase : Optional[Any] = self.activation(A_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Any = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCAmelCase : List[str] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCAmelCase : List[Any] = config.num_channels
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
_UpperCAmelCase : int = self.embedder(A_ )
_UpperCAmelCase : int = self.pooler(A_ )
return embedding
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ = 2 ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Union[str, Any] = nn.Convad(A_ , A_ , kernel_size=1 , stride=A_ , bias=A_ )
_UpperCAmelCase : Optional[int] = nn.BatchNormad(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.convolution(A_ )
_UpperCAmelCase : List[str] = self.normalization(A_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Dict = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase : int = nn.Sequential(
ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , activation=A_ ) , )
_UpperCAmelCase : Dict = ACTaFN[activation]
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = hidden_state
_UpperCAmelCase : Any = self.layer(A_ )
_UpperCAmelCase : Optional[int] = self.shortcut(A_ )
hidden_state += residual
_UpperCAmelCase : Optional[int] = self.activation(A_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ = 1 , A_ = "relu" , A_ = 4 ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = out_channels // reduction
_UpperCAmelCase : List[str] = (
ResNetShortCut(A_ , A_ , stride=A_ ) if should_apply_shortcut else nn.Identity()
)
_UpperCAmelCase : Dict = nn.Sequential(
ResNetConvLayer(A_ , A_ , kernel_size=1 ) , ResNetConvLayer(A_ , A_ , stride=A_ ) , ResNetConvLayer(A_ , A_ , kernel_size=1 , activation=A_ ) , )
_UpperCAmelCase : List[str] = ACTaFN[activation]
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = hidden_state
_UpperCAmelCase : List[str] = self.layer(A_ )
_UpperCAmelCase : List[str] = self.shortcut(A_ )
hidden_state += residual
_UpperCAmelCase : Dict = self.activation(A_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Any = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
_UpperCAmelCase : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , stride=A_ , activation=config.hidden_act ) , *[layer(A_ , A_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = input
for layer in self.layers:
_UpperCAmelCase : Optional[Any] = layer(A_ )
return hidden_state
class a ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A_ , config.depths[1:] ):
self.stages.append(ResNetStage(A_ , A_ , A_ , depth=A_ ) )
def _UpperCAmelCase ( self , A_ , A_ = False , A_ = True ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Dict = hidden_states + (hidden_state,)
_UpperCAmelCase : str = stage_module(A_ )
if output_hidden_states:
_UpperCAmelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=A_ , hidden_states=A_ , )
class a ( UpperCAmelCase ):
_lowercase = ResNetConfig
_lowercase = "resnet"
_lowercase = "pixel_values"
_lowercase = True
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(A_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _UpperCAmelCase ( self , A_ , A_=False ):
'''simple docstring'''
if isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = value
SCREAMING_SNAKE_CASE_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , UpperCAmelCase , )
class a ( UpperCAmelCase ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : Any = ResNetEmbeddings(A_ )
_UpperCAmelCase : str = ResNetEncoder(A_ )
_UpperCAmelCase : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : List[Any] = self.embedder(A_ )
_UpperCAmelCase : str = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ )
_UpperCAmelCase : List[Any] = encoder_outputs[0]
_UpperCAmelCase : int = self.pooler(A_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase , )
class a ( UpperCAmelCase ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : str = ResNetModel(A_ )
# classification head
_UpperCAmelCase : int = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self , A_ = None , A_ = None , A_ = None , A_ = None , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Tuple = self.resnet(A_ , output_hidden_states=A_ , return_dict=A_ )
_UpperCAmelCase : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : int = self.classifier(A_ )
_UpperCAmelCase : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase : Optional[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase : Optional[Any] = "single_label_classification"
else:
_UpperCAmelCase : Any = "multi_label_classification"
if self.config.problem_type == "regression":
_UpperCAmelCase : str = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase : Optional[int] = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase : Any = CrossEntropyLoss()
_UpperCAmelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase : Any = BCEWithLogitsLoss()
_UpperCAmelCase : Tuple = loss_fct(A_ , A_ )
if not return_dict:
_UpperCAmelCase : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , UpperCAmelCase , )
class a ( UpperCAmelCase , UpperCAmelCase ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
super()._init_backbone(A_ )
_UpperCAmelCase : Optional[int] = [config.embedding_size] + config.hidden_sizes
_UpperCAmelCase : str = ResNetEmbeddings(A_ )
_UpperCAmelCase : List[Any] = ResNetEncoder(A_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@replace_return_docstrings(output_type=A_ , config_class=_CONFIG_FOR_DOC )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Tuple = self.embedder(A_ )
_UpperCAmelCase : Optional[int] = self.encoder(A_ , output_hidden_states=A_ , return_dict=A_ )
_UpperCAmelCase : Optional[int] = outputs.hidden_states
_UpperCAmelCase : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCAmelCase : Union[str, Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=A_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=A_ , )
| 189
| 0
|
import os
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowercase__ : int = os.path.join(lowerCamelCase__ , "triangle.txt" )
with open(lowerCamelCase__ ) as f:
lowercase__ : Union[str, Any] = f.readlines()
lowercase__ : int = []
for line in triangle:
lowercase__ : Union[str, Any] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(lowerCamelCase__ ) )
a.append(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
for j in range(len(a[i] ) ):
lowercase__ : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowercase__ : List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 130
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a ( metaclass=UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = ['torch', 'torchsde']
def __init__( self : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 173
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "codegen"
SCREAMING_SNAKE_CASE_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=5_0400, lowerCAmelCase__=2048, lowerCAmelCase__=2048, lowerCAmelCase__=4096, lowerCAmelCase__=28, lowerCAmelCase__=16, lowerCAmelCase__=64, lowerCAmelCase__=None, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = n_ctx
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = rotary_dim
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, tie_word_embeddings=lowerCAmelCase__, **lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "default", lowerCAmelCase__ = None, lowerCAmelCase__ = False, ) -> Tuple:
super().__init__(lowerCAmelCase__, task=lowerCAmelCase__, patching_specs=lowerCAmelCase__, use_past=lowerCAmelCase__)
if not getattr(self._config, 'pad_token_id', lowerCAmelCase__):
# TODO: how to do that better?
snake_case_ = 0
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
snake_case_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__, direction='inputs')
snake_case_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a_ ( self) -> int:
return self._config.n_layer
@property
def a_ ( self) -> int:
return self._config.n_head
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, ) -> Mapping[str, Any]:
snake_case_ = super(lowerCAmelCase__, self).generate_dummy_inputs(
lowerCAmelCase__, batch_size=lowerCAmelCase__, seq_length=lowerCAmelCase__, is_pair=lowerCAmelCase__, framework=lowerCAmelCase__)
# We need to order the input in the way they appears in the forward()
snake_case_ = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
snake_case_ , snake_case_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ = [
(torch.zeros(lowerCAmelCase__), torch.zeros(lowerCAmelCase__)) for _ in range(self.num_layers)
]
snake_case_ = common_inputs['attention_mask']
if self.use_past:
snake_case_ = ordered_inputs['attention_mask'].dtype
snake_case_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase__, lowerCAmelCase__, dtype=lowerCAmelCase__)], dim=1)
return ordered_inputs
@property
def a_ ( self) -> int:
return 13
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
from manim import *
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = Rectangle(height=0.5 ,width=0.5 )
A = Rectangle(height=0.25 ,width=0.25 )
A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A = [mem.copy() for i in range(6 )]
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('CPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
A = [mem.copy() for i in range(4 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('GPU' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Model' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
A = []
A = []
A = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
A = Rectangle(height=0.46 / 4 ,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=A_ ,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=A_ ,buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ ,*A_ ,*A_ )
A = [mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = Text('Loaded Checkpoint' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
A = []
A = []
for i, rect in enumerate(A_ ):
A = fill.copy().set_fill(A_ ,opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
A = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ ,*A_ )
A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(A_ ,A_ )
A = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(A_ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(A_ )
A = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
A = [meta_mem.copy() for i in range(6 )]
A = [meta_mem.copy() for i in range(6 )]
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(*A_ ).arrange(A_ ,buff=0 )
A = VGroup(A_ ,A_ ).arrange(A_ ,buff=0 )
A = Text('Disk' ,font_size=24 )
A = Group(A_ ,A_ ).arrange(A_ ,buff=0.5 ,aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ ,run_time=3 ) ,Write(A_ ,run_time=1 ) ,Create(A_ ,run_time=1 ) )
A = []
for i, rect in enumerate(A_ ):
A = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ ,run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
A = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ,run_time=3 ) )
self.play(
FadeOut(A_ ,A_ ,*A_ ,*A_ ) ,)
self.wait()
| 74
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74
| 1
|
"""simple docstring"""
lowerCamelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCamelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__a ,__a ,__a )
order.append(__a )
return order
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Tuple = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__a ,__a ,__a )
return component
def snake_case ( A__ ):
UpperCAmelCase_ : Dict = len(__a ) * [False]
UpperCAmelCase_ : dict[int, list[int]] = {vert: [] for vert in range(len(__a ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__a )
UpperCAmelCase_ : Union[str, Any] = []
for i, was_visited in enumerate(__a ):
if not was_visited:
order += topology_sort(__a ,__a ,__a )
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = len(__a ) * [False]
for i in range(len(__a ) ):
UpperCAmelCase_ : List[str] = order[len(__a ) - i - 1]
if not visited[vert]:
UpperCAmelCase_ : List[str] = find_components(__a ,__a ,__a )
components_list.append(__a )
return components_list
| 355
|
"""simple docstring"""
import numpy as np
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase_ : List[Any] = ya
UpperCAmelCase_ : Optional[int] = xa
for k in range(A__ ):
UpperCAmelCase_ : List[str] = f(A__ ,y[k] )
UpperCAmelCase_ : Any = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Dict = f(x + h ,y[k] + h * ka )
UpperCAmelCase_ : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 1
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(_UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
return min(
minimax(depth + 1, node_index * 2, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), minimax(depth + 1, node_index * 2 + 1, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
lowerCAmelCase : List[str] = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowerCAmelCase : int = math.log(len(_UpperCAmelCase ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 323
|
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = ViTMSNModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase)
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""")
print("""Labels: {labels}""")
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowerCAmelCase = model(__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Optional[int] = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ViTMSNModelTester(self)
lowerCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(__lowerCAmelCase)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTMSNModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
def snake_case__ ( ) -> int:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""") if is_vision_available() else None
@slow
def a_ ( self):
"""simple docstring"""
torch.manual_seed(2)
lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""").to(__lowerCAmelCase)
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""").to(__lowerCAmelCase)
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**__lowerCAmelCase)
# verify the logits
lowerCAmelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __lowerCAmelCase)
lowerCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375]).to(__lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4))
| 272
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 293
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 250
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Optional[Any] = get_dataset(A__, A__ )
for index in range(A__ ):
_lowerCamelCase : Optional[Any] = random.sample(range(len(A__ ) ), 4 )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = update_image_and_anno(
A__, A__, A__, A__, A__, filter_scale=A__, )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCamelCase : str = random_chars(32 )
_lowerCamelCase : Union[str, Any] = path.split(os.sep )[-1].rsplit('''.''', 1 )[0]
_lowerCamelCase : Tuple = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''', A__, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_lowerCamelCase : Optional[Any] = []
for anno in new_annos:
_lowerCamelCase : Optional[int] = anno[3] - anno[1]
_lowerCamelCase : List[str] = anno[4] - anno[2]
_lowerCamelCase : int = anno[1] + width / 2
_lowerCamelCase : Dict = anno[2] + height / 2
_lowerCamelCase : List[Any] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(A__ )
with open(F'''{file_root}.txt''', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( A_ : List[Any], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = []
_lowerCamelCase : Optional[Any] = []
for label_file in glob.glob(os.path.join(A__, '''*.txt''' ) ):
_lowerCamelCase : List[str] = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(A__ ) as in_file:
_lowerCamelCase : List[str] = in_file.readlines()
_lowerCamelCase : Optional[Any] = os.path.join(A__, F'''{label_name}.jpg''' )
_lowerCamelCase : Tuple = []
for obj_list in obj_lists:
_lowerCamelCase : int = obj_list.rstrip('''\n''' ).split(''' ''' )
_lowerCamelCase : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
_lowerCamelCase : str = float(obj[2] ) - float(obj[4] ) / 2
_lowerCamelCase : Dict = float(obj[1] ) + float(obj[3] ) / 2
_lowerCamelCase : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def snake_case_ ( A_ : Optional[Any], A_ : Dict, A_ : Optional[int], A_ : Optional[Any], A_ : Tuple, A_ : int = 0.0, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3], dtype=np.uinta )
_lowerCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase : Dict = int(scale_x * output_size[1] )
_lowerCamelCase : Optional[int] = int(scale_y * output_size[0] )
_lowerCamelCase : str = []
_lowerCamelCase : List[str] = []
for i, index in enumerate(A__ ):
_lowerCamelCase : List[Any] = all_img_list[index]
path_list.append(A__ )
_lowerCamelCase : Any = all_annos[index]
_lowerCamelCase : str = cva.imread(A__ )
if i == 0: # top-left
_lowerCamelCase : Union[str, Any] = cva.resize(A__, (divid_point_x, divid_point_y) )
_lowerCamelCase : Any = img
for bbox in img_annos:
_lowerCamelCase : List[Any] = bbox[1] * scale_x
_lowerCamelCase : Optional[int] = bbox[2] * scale_y
_lowerCamelCase : Any = bbox[3] * scale_x
_lowerCamelCase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCamelCase : Optional[int] = cva.resize(A__, (output_size[1] - divid_point_x, divid_point_y) )
_lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
_lowerCamelCase : str = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
_lowerCamelCase : List[Any] = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCamelCase : str = cva.resize(A__, (divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
_lowerCamelCase : Dict = bbox[1] * scale_x
_lowerCamelCase : List[Any] = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase : Optional[int] = bbox[3] * scale_x
_lowerCamelCase : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCamelCase : Tuple = cva.resize(
A__, (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase : Tuple = img
for bbox in img_annos:
_lowerCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase : List[str] = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCamelCase : Dict = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_lowerCamelCase : List[Any] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 72
|
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Optional[Any] , a : bool , a : Optional[int] = None , a : Optional[int] = None ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(a , a )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : VQModel
lowerCamelCase : CLIPTextModel
lowerCamelCase : CLIPTokenizer
lowerCamelCase : TransformeraDModel
lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings
lowerCamelCase : VQDiffusionScheduler
def __init__( self : List[str] , a : VQModel , a : CLIPTextModel , a : CLIPTokenizer , a : TransformeraDModel , a : VQDiffusionScheduler , a : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=a , transformer=a , text_encoder=a , tokenizer=a , scheduler=a , learned_classifier_free_sampling_embeddings=a , )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[str] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(a , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
a , padding='''max_length''' , max_length=a , truncation=a , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , a , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[int] , a : Union[str, List[str]] , a : int = 1_00 , a : float = 5.0 , a : float = 1.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , ):
"""simple docstring"""
if isinstance(a , a ):
__lowerCamelCase = 1
elif isinstance(a , a ):
__lowerCamelCase = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(a , a , a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(a , a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(a ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(a , encoder_hidden_states=a , timestep=a ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(a , dim=1 , keepdim=a )
__lowerCamelCase = self.truncate(a , a )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(a , timestep=a , sample=a , generator=a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(a , shape=a )
__lowerCamelCase = self.vqvae.decode(a , force_not_quantize=a ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : torch.FloatTensor , a : float ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = torch.sort(a , 1 , descending=a )
__lowerCamelCase = torch.exp(a )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , a )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 237
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
__lowerCamelCase = 3
__lowerCamelCase = 2_50
__lowerCamelCase = ids_tensor((batch_size, length) , a )
__lowerCamelCase = torch.ones((batch_size, length) , device=a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = MaxLengthCriteria(max_length=10 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase , __lowerCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(a , a ) )
__lowerCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_tensors(5 )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a , a ) )
__lowerCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a , a ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__lowerCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(a ) , 1 )
| 237
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase : Union[str, Any] = 16
lowercase : Union[str, Any] = 32
def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 ):
'''simple docstring'''
A : str = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
A : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A : int = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A : int = 16
elif accelerator.mixed_precision != "no":
A : Tuple = 8
else:
A : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding='''longest''' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase : List[Any] = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case__ ) == "1":
A : int = 2
# New Code #
A : Any = int(args.gradient_accumulation_steps )
A : Any = int(args.local_sgd_steps )
# Initialize accelerator
A : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=snake_case__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : Union[str, Any] = config['''lr''']
A : Union[str, Any] = int(config['''num_epochs'''] )
A : List[Any] = int(config['''seed'''] )
A : Tuple = int(config['''batch_size'''] )
A : Dict = evaluate.load('''glue''' , '''mrpc''' )
set_seed(snake_case__ )
A, A : int = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : str = model.to(accelerator.device )
# Instantiate optimizer
A : List[Any] = AdamW(params=model.parameters() , lr=snake_case__ )
# Instantiate scheduler
A : Any = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A, A, A, A, A : List[Any] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
with LocalSGD(
accelerator=snake_case__ , model=snake_case__ , local_sgd_steps=snake_case__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case__ ):
A : List[str] = model(**snake_case__ )
A : Union[str, Any] = output.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A : List[str] = model(**snake_case__ )
A : Optional[int] = outputs.logits.argmax(dim=-1 )
A, A : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case__ , default=snake_case__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=snake_case__ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A : Dict = parser.parse_args()
A : Dict = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 3
|
UpperCAmelCase : dict[tuple[int, int, int], int] ={}
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase_ = _calculate(days - 1 , _lowerCAmelCase , late + 1)
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase_ = _calculate(days - 1 , absent + 1 , 0)
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase_ = _calculate(days - 1 , _lowerCAmelCase , 0)
UpperCamelCase_ = state_late + state_absent + state_ontime
UpperCamelCase_ = prizestrings
return prizestrings
def _lowerCAmelCase (_lowerCAmelCase = 30):
return _calculate(_lowerCAmelCase , absent=0 , late=0)
if __name__ == "__main__":
print(solution())
| 128
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : Tuple = (PNDMScheduler,)
lowercase_ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase ( self, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = dict(self.forward_default_kwargs)
_lowercase : int = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
_lowercase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : Optional[int] = dummy_past_residuals[:]
_lowercase : List[Any] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Any = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowercase : Union[str, Any] = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Optional[int] = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Dict = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
_lowercase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : Union[str, Any] = self.get_scheduler_config()
_lowercase : Any = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Any = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Optional[int] = dummy_past_residuals[:]
_lowercase : Any = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowercase : Tuple = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[str] = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Union[str, Any] = scheduler_class(**lowerCamelCase)
_lowercase : Optional[Any] = 10
_lowercase : Tuple = self.dummy_model()
_lowercase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.prk_timesteps):
_lowercase : Any = model(lowerCamelCase, lowerCamelCase)
_lowercase : Union[str, Any] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
_lowercase : Union[str, Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Optional[Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
for scheduler_class in self.scheduler_classes:
_lowercase : Dict = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_sample
_lowercase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase, 'set_timesteps'):
scheduler.set_timesteps(lowerCamelCase)
elif num_inference_steps is not None and not hasattr(lowerCamelCase, 'set_timesteps'):
_lowercase : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowercase : List[Any] = dummy_past_residuals[:]
_lowercase : Optional[int] = scheduler.step_prk(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Union[str, Any] = scheduler.step_prk(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
_lowercase : Tuple = scheduler.step_plms(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Any = scheduler.step_plms(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase)
_lowercase : Any = self.scheduler_classes[0]
_lowercase : Union[str, Any] = self.get_scheduler_config(steps_offset=1)
_lowercase : Any = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]), )
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1], [0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=lowerCamelCase, beta_end=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = 27
for scheduler_class in self.scheduler_classes:
_lowercase : Tuple = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : int = self.get_scheduler_config()
_lowercase : int = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
_lowercase : List[str] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
def UpperCamelCase ( self) -> int:
"""simple docstring"""
with self.assertRaises(lowerCamelCase):
_lowercase : str = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = self.full_loop()
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 1_9_8.1_3_1_8) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0) < 1E-3
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = self.full_loop(prediction_type='v_prediction')
_lowercase : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase))
_lowercase : Optional[Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 6_7.3_9_8_6) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8) < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.0_1)
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 2_3_0.0_3_9_9) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.0_1)
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 1_8_6.9_4_8_2) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4) < 1E-3
| 84
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = torch.nn.Linear(10, 10)
_lowercase : Optional[int] = torch.optim.SGD(model.parameters(), 0.1)
_lowercase : Optional[int] = Accelerator()
_lowercase : Optional[int] = accelerator.prepare(lowerCamelCase)
try:
pickle.loads(pickle.dumps(lowerCamelCase))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 84
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase , cache_dir=lowercase )
lowerCamelCase_ = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase )[0] , "snapshots" ) )]
lowerCamelCase_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase )
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = 4
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = jax.random.split(lowercase , lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
lowerCamelCase_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase ) == num_samples
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase )
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = 50
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = jax.random.split(lowercase , lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase )
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = 50
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = jax.random.split(lowercase , lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = 50
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = jax.random.split(lowercase , lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase , steps_offset=1 , )
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
lowerCamelCase_ = scheduler.create_state()
lowerCamelCase_ = scheduler_state
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = 50
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = jax.random.split(lowercase , lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
lowerCamelCase_ = jax.device_count()
lowerCamelCase_ = num_samples * [prompt]
lowerCamelCase_ = jax.random.split(jax.random.PRNGKey(0 ) , lowercase )
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase , )
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase_ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowerCamelCase_ , lowerCamelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
lowerCamelCase_ = replicate(lowercase )
lowerCamelCase_ = pipeline.prepare_inputs(lowercase )
lowerCamelCase_ = shard(lowercase )
lowerCamelCase_ = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase_ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 19
|
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47
| 0
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = R'\w+[.]\d+'
UpperCAmelCase = re.findall(lowercase_ , lowercase_ )
for pat in pats:
UpperCAmelCase = key.replace(lowercase_ , '_'.join(pat.split('.' ) ) )
return key
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=42 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase = flax_model.init_weights(PRNGKey(lowercase_ ) )
UpperCAmelCase = flatten_dict(lowercase_ )
UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = rename_key(lowercase_ )
UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(lowercase_ , lowercase_ , lowercase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(lowercase_ )
return unflatten_dict(lowercase_ )
| 181
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( lowercase_ ):
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :Iterable[torch.nn.Parameter] , lowercase_ :float = 0.9999 , lowercase_ :float = 0.0 , lowercase_ :int = 0 , lowercase_ :bool = False , lowercase_ :Union[float, int] = 1.0 , lowercase_ :Union[float, int] = 2 / 3 , lowercase_ :Optional[Any] = None , lowercase_ :Dict[str, Any] = None , **lowercase_ :Dict , ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('max_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['max_value']
if kwargs.get('min_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['min_value']
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase_ ) is not None:
UpperCAmelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs['device'] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> "EMAModel":
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
UpperCAmelCase = model_cls.from_pretrained(lowercase_ )
UpperCAmelCase = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('shadow_params' , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> float:
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None ) -> None:
UpperCAmelCase = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> None:
UpperCAmelCase = copy.deepcopy(lowercase_ )
UpperCAmelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCAmelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError('Invalid min_decay' )
UpperCAmelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError('Invalid optimization_step' )
UpperCAmelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError('Invalid update_after_step' )
UpperCAmelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCAmelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCAmelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCAmelCase = state_dict.get('shadow_params' , lowercase_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 181
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( A__ ) -> float:
return np.dot(A__ , A__ )
class A__ :
"""simple docstring"""
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
a__ : Tuple = regularization
a__ : Optional[Any] = gamma
if kernel == "linear":
a__ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
a__ : str = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a__ : Optional[int] = F'Unknown kernel: {kernel}'
raise ValueError(lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : List[str] = observations
a__ : Dict = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a__) , ) : Optional[int] = np.shape(lowercase)
def to_minimize(lowercase) -> float:
a__ : Tuple = 0
((a__) , ) : Optional[int] = np.shape(lowercase)
for i in range(lowercase):
for j in range(lowercase):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase)
a__ : Optional[Any] = LinearConstraint(lowercase , 0 , 0)
a__ : str = Bounds(0 , self.regularization)
a__ : List[str] = minimize(
lowercase , np.ones(lowercase) , bounds=lowercase , constraints=[ly_contraint]).x
a__ : Dict = l_star
# calculating mean offset of separation plane to points
a__ : int = 0
for i in range(lowercase):
for j in range(lowercase):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
a__ : List[str] = s / n
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
a__ : int = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def snake_case__ ( self : str )-> int:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
A__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self : Optional[int] )-> List[Any]:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices.' )
A__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_,env=os.environ.copy() )
@require_multi_gpu
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
A__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1,cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase_,env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 10)
lowercase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 282
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[str],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : int = 3_2,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073],lowercase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711],lowercase_ : bool = True,lowercase_ : Tuple=7,lowercase_ : str=3_0,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Dict=3,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = do_resize
A__ = size if size is not None else {'shortest_edge': 2_8_8}
A__ = size_divisor
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = do_center_crop
A__ = image_mean
A__ = image_std
A__ = do_pad
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self : int,lowercase_ : Optional[int],lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if not batched:
A__ = self.size['shortest_edge']
A__ = image_inputs[0]
if isinstance(lowercase_,Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
A__ = size / min(lowercase_,lowercase_ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
A__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase_,lowercase_ ) > max_size:
A__ = max_size / max(lowercase_,lowercase_ )
A__ = newh * scale
A__ = neww * scale
A__ , A__ = int(newh + 0.5 ), int(neww + 0.5 )
A__ , A__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase_,key=lambda lowercase_ : item[0] )[0]
A__ = max(lowercase_,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_,'image_mean' ) )
self.assertTrue(hasattr(lowercase_,'image_std' ) )
self.assertTrue(hasattr(lowercase_,'do_normalize' ) )
self.assertTrue(hasattr(lowercase_,'do_resize' ) )
self.assertTrue(hasattr(lowercase_,'size' ) )
self.assertTrue(hasattr(lowercase_,'size_divisor' ) )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 282
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
_a : Optional[Any] = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
_a : Optional[int] = nn.Parameter(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = np.asarray(weights[0] )
_a : str = np.asarray(weights[1] )
_a : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = np.asarray(weights[0] )
_a : Dict = np.asarray(weights[1] )
_a : Any = np.asarray(weights[2] )
_a : List[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = weights[0][0][0]
_a : str = np.asarray(layer_norm_a[0] )
_a : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
_a : Optional[int] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
_a : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
_a : int = intermediate_weights[2]
# layernorm 2
_a : List[str] = np.asarray(intermediate_weights[0][0] )
_a : Any = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
_a : Optional[Any] = np.asarray(intermediate_weights[1][0] )
_a : Tuple = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
_a : Optional[Any] = np.asarray(intermediate_weights[4][0] )
_a : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = torch_model.reformer
# word embeds
_a : Any = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
_a : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
_a : List[str] = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
_a : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : int = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
_a : Optional[int] = np.asarray(weights[7][0] )
_a : Dict = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_a : Any = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
_a : Union[str, Any] = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 365
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def A ( _UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
_UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
_UpperCAmelCase = F"{olid} is not a valid Open Library olid"
raise ValueError(_UpperCAmelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def A ( _UpperCAmelCase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
_UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
_UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = ', '.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCAmelCase__ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print("\n".join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 339
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339
| 1
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ):
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase_ : str = [1, 2, 3]
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : List[str] = [1, 2]
lowercase_ : Any = {'''a''': 1, '''b''': 2}
lowercase_ : Dict = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase_ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase_ : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase_ : int = [2, 3]
lowercase_ : Optional[int] = {'''a''': 2, '''b''': 3}
lowercase_ : Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase_ : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase_ : Optional[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 264
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCAmelCase_ = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = super().to_dict()
for k, v in d.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = v.to_dict()
return d
| 264
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[int] = 0
while number > 0:
lowercase__ : List[Any] = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : Any = factorial(__lowerCamelCase )
lowercase__ : Dict = split_and_add(__lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 16
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (lowerCamelCase_ ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Dict:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
raise NotImplementedError()
| 240
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> List[str]:
pass
def UpperCAmelCase_ ( self )-> Optional[int]:
pass
def UpperCAmelCase_ ( self )-> Dict:
pass
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = np.abs((a - b) ).max()
self.assertLessEqual(_lowercase , _lowercase , F"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> List[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_lowercase )
UpperCamelCase_ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_lowercase , _lowercase )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
UpperCamelCase_ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> int:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_lowercase , _lowercase )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
UpperCamelCase_ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
UpperCamelCase_ = model(input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = after_output[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , **_lowercase )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.get_vision_text_model(_lowercase , _lowercase )
UpperCamelCase_ = {"vision_model": vision_model, "text_model": text_model}
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
UpperCamelCase_ = model(
input_ids=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , output_attentions=_lowercase )
UpperCamelCase_ = output.vision_model_output.attentions
self.assertEqual(len(_lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = to_atuple(vision_model.config.image_size )
UpperCamelCase_ = to_atuple(vision_model.config.patch_size )
UpperCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase_ = output.text_model_output.attentions
self.assertEqual(len(_lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
pt_model.to(_lowercase )
pt_model.eval()
# prepare inputs
UpperCamelCase_ = inputs_dict
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase_ = pt_model(**_lowercase ).to_tuple()
UpperCamelCase_ = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowercase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase , from_pt=_lowercase )
UpperCamelCase_ = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_lowercase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
UpperCamelCase_ = VisionTextDualEncoderModel.from_pretrained(_lowercase , from_flax=_lowercase )
pt_model_loaded.to(_lowercase )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowercase , pt_output_loaded.numpy() , 4e-2 )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
UpperCamelCase_ = VisionTextDualEncoderModel(_lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_lowercase )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
UpperCamelCase_ = fx_state
self.check_pt_flax_equivalence(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> List[Any]:
UpperCamelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase , _lowercase )
UpperCamelCase_ = VisionTextDualEncoderModel(_lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel(_lowercase )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
self.check_pt_flax_equivalence(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_save_load(**_lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowercase )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_inputs_dict.pop("vision_config" )
UpperCamelCase_ = config_inputs_dict.pop("text_config" )
UpperCamelCase_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowercase , _lowercase , _lowercase )
self.check_equivalence_flax_to_pt(_lowercase , _lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.get_pretrained_model_and_inputs()
UpperCamelCase_ = model_a(**_lowercase )
UpperCamelCase_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowercase )
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
UpperCamelCase_ = model_a(**_lowercase )
UpperCamelCase_ = after_outputs[0]
UpperCamelCase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase , 1e-5 )
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=_lowercase , text_from_pt=_lowercase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = FlaxViTModel(_lowercase )
UpperCamelCase_ = FlaxBertModel(_lowercase )
return vision_model, text_model
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = FlaxViTModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( snake_case , unittest.TestCase ):
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=_lowercase , text_from_pt=_lowercase , )
UpperCamelCase_ = 13
UpperCamelCase_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase_ = random_attention_mask([batch_size, 4] )
UpperCamelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> str:
UpperCamelCase_ = FlaxCLIPVisionModel(_lowercase )
UpperCamelCase_ = FlaxBertModel(_lowercase )
return vision_model, text_model
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = FlaxCLIPVisionModelTester(self )
UpperCamelCase_ = FlaxBertModelTester(self )
UpperCamelCase_ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase_ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = vision_config_and_inputs
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase_ = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_lowercase , padding=_lowercase , return_tensors="np" )
UpperCamelCase_ = model(**_lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase_ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _lowercase , atol=1e-3 ) )
| 60
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase_ :Dict = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase_ :Optional[int] = """image_qa"""
UpperCamelCase_ :int = AutoProcessor
UpperCamelCase_ :Tuple = AutoModelForVisualQuestionAnswering
UpperCamelCase_ :Optional[int] = ["""image""", """text"""]
UpperCamelCase_ :Tuple = ["""text"""]
def __init__( self , *_lowercase , **_lowercase )-> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> str:
return self.pre_processor(_lowercase , _lowercase , return_tensors="pt" )
def UpperCAmelCase_ ( self , _lowercase )-> str:
with torch.no_grad():
return self.model(**_lowercase ).logits
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
UpperCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 60
| 1
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=None , **snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def A__ ( self ) -> int:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case_ , snake_case_ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case_ ):
try:
setattr(snake_case_ , snake_case_ , snake_case_ )
self.parent.assertEqual(
getattr(snake_case_ , snake_case_ ) , snake_case_ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case_ , snake_case_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case_ ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case_ , snake_case_ ) , snake_case_ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case_ , snake_case_ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(snake_case_ , """config.json""" )
config_first.to_json_file(snake_case_ )
__lowerCAmelCase = self.config_class.from_json_file(snake_case_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A__ ( self ) -> str:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case_ )
__lowerCAmelCase = self.config_class.from_pretrained(snake_case_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A__ ( self ) -> int:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
config_first.save_pretrained(snake_case_ )
__lowerCAmelCase = self.config_class.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def A__ ( self ) -> int:
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def A__ ( self ) -> Tuple:
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(snake_case_ )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = copy.deepcopy(snake_case_ )
__lowerCAmelCase = self.config_class(**snake_case_ )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(snake_case_ , snake_case_ ) != value:
wrong_values.append((key, getattr(snake_case_ , snake_case_ ), value) )
if len(snake_case_ ) > 0:
__lowerCAmelCase = """\n""".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def A__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 301
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCamelCase : Dict ={
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
lowerCamelCase : List[Any] =logging.WARNING
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : int = os.getenv("DATASETS_VERBOSITY" , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option DATASETS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def SCREAMING_SNAKE_CASE ( ) -> str:
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE ( ) -> None:
# Apply our default configuration to the library root logger.
UpperCamelCase__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def SCREAMING_SNAKE_CASE ( ) -> None:
UpperCamelCase__ : Any = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = None ) -> logging.Logger:
if name is None:
UpperCamelCase__ : List[Any] = _get_library_name()
return logging.getLogger(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None:
_get_library_root_logger().setLevel(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
UpperCamelCase__ : List[str] = False
def SCREAMING_SNAKE_CASE ( ) -> None:
UpperCamelCase__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __a :
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase__ : List[str] = args[0] if args else None
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
'''simple docstring'''
return self
def __exit__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return
lowerCamelCase : str =True
class __a :
def __call__( self : int , *SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase : List[str] =_tqdm_cls()
def SCREAMING_SNAKE_CASE ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
global _tqdm_active
UpperCamelCase__ : Tuple = True
def SCREAMING_SNAKE_CASE ( ) -> Any:
global _tqdm_active
UpperCamelCase__ : List[str] = False
| 196
|
import argparse
import os
import re
import packaging.version
lowerCamelCase : Optional[Any] ='''examples/'''
lowerCamelCase : List[Any] ={
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : List[str] ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : int ='''README.md'''
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : List[Any] = f.read()
UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern]
UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase )
UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures"
UpperCamelCase__ : Tuple = "1. Want to contribute a new model?"
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ : Optional[int] = f.readlines()
# Find the start of the list.
UpperCamelCase__ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCamelCase__ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCamelCase__ : str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCamelCase__ : str = f.read()
UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]:
UpperCamelCase__ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCamelCase__ : List[str] = default_version.base_version
elif patch:
UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : str = get_version()
UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCamelCase__ : int = current_version.base_version
# Check with the user we got that right.
UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' )
if len(__lowerCAmelCase ) == 0:
UpperCamelCase__ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(__lowerCAmelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Optional[Any] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 196
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.